1:255.11-alt1
[systemd_ALT.git] / src / cgtop / cgtop.c
blob08eae5988b0c43b0bb1a65e439e8e714c03dcb69
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
3 #include <errno.h>
4 #include <getopt.h>
5 #include <signal.h>
6 #include <stdint.h>
7 #include <stdlib.h>
8 #include <unistd.h>
10 #include "sd-bus.h"
12 #include "alloc-util.h"
13 #include "build.h"
14 #include "bus-error.h"
15 #include "bus-util.h"
16 #include "cgroup-show.h"
17 #include "cgroup-util.h"
18 #include "fd-util.h"
19 #include "fileio.h"
20 #include "hashmap.h"
21 #include "main-func.h"
22 #include "missing_sched.h"
23 #include "parse-argument.h"
24 #include "parse-util.h"
25 #include "path-util.h"
26 #include "pretty-print.h"
27 #include "process-util.h"
28 #include "procfs-util.h"
29 #include "sort-util.h"
30 #include "stdio-util.h"
31 #include "strv.h"
32 #include "terminal-util.h"
33 #include "unit-name.h"
34 #include "virt.h"
36 typedef struct Group {
37 char *path;
39 bool n_tasks_valid;
40 bool cpu_valid;
41 bool memory_valid;
42 bool io_valid;
44 uint64_t n_tasks;
46 unsigned cpu_iteration;
47 nsec_t cpu_usage;
48 nsec_t cpu_timestamp;
49 double cpu_fraction;
51 uint64_t memory;
53 unsigned io_iteration;
54 uint64_t io_input, io_output;
55 nsec_t io_timestamp;
56 uint64_t io_input_bps, io_output_bps;
57 } Group;
59 /* Counted objects, enum order matters */
60 typedef enum PidsCount {
61 COUNT_USERSPACE_PROCESSES, /* least */
62 COUNT_ALL_PROCESSES,
63 COUNT_PIDS, /* most, requires pids controller */
64 } PidsCount;
66 static unsigned arg_depth = 3;
67 static unsigned arg_iterations = UINT_MAX;
68 static bool arg_batch = false;
69 static bool arg_raw = false;
70 static usec_t arg_delay = 1*USEC_PER_SEC;
71 static char* arg_machine = NULL;
72 static char* arg_root = NULL;
73 static bool arg_recursive = true;
74 static bool arg_recursive_unset = false;
76 static PidsCount arg_count = COUNT_PIDS;
78 static enum {
79 ORDER_PATH,
80 ORDER_TASKS,
81 ORDER_CPU,
82 ORDER_MEMORY,
83 ORDER_IO,
84 } arg_order = ORDER_CPU;
86 static enum {
87 CPU_PERCENT,
88 CPU_TIME,
89 } arg_cpu_type = CPU_PERCENT;
91 static Group *group_free(Group *g) {
92 if (!g)
93 return NULL;
95 free(g->path);
96 return mfree(g);
99 DEFINE_PRIVATE_HASH_OPS_WITH_VALUE_DESTRUCTOR(group_hash_ops, char, path_hash_func, path_compare, Group, group_free);
101 static const char *maybe_format_timespan(char *buf, size_t l, usec_t t, usec_t accuracy) {
102 if (arg_raw) {
103 (void) snprintf(buf, l, USEC_FMT, t);
104 return buf;
106 return format_timespan(buf, l, t, accuracy);
109 #define BUFSIZE1 CONST_MAX(FORMAT_TIMESPAN_MAX, DECIMAL_STR_MAX(usec_t))
110 #define MAYBE_FORMAT_TIMESPAN(t, accuracy) \
111 maybe_format_timespan((char[BUFSIZE1]){}, BUFSIZE1, t, accuracy)
113 static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
114 if (!is_valid)
115 return "-";
116 if (arg_raw) {
117 (void) snprintf(buf, l, "%" PRIu64, t);
118 return buf;
120 return format_bytes(buf, l, t);
123 #define BUFSIZE2 CONST_MAX(FORMAT_BYTES_MAX, DECIMAL_STR_MAX(uint64_t))
124 #define MAYBE_FORMAT_BYTES(is_valid, t) \
125 maybe_format_bytes((char[BUFSIZE2]){}, BUFSIZE2, is_valid, t)
127 static bool is_root_cgroup(const char *path) {
129 /* Returns true if the specified path belongs to the root cgroup. The root cgroup is special on cgroup v2 as it
130 * carries only very few attributes in order not to export multiple truth about system state as most
131 * information is available elsewhere in /proc anyway. We need to be able to deal with that, and need to get
132 * our data from different sources in that case.
134 * There's one extra complication in all of this, though 😣: if the path to the cgroup indicates we are in the
135 * root cgroup this might actually not be the case, because cgroup namespacing might be in effect
136 * (CLONE_NEWCGROUP). Since there's no nice way to distinguish a real cgroup root from a fake namespaced one we
137 * do an explicit container check here, under the assumption that CLONE_NEWCGROUP is generally used when
138 * container managers are used too.
140 * Note that checking for a container environment is kinda ugly, since in theory people could use cgtop from
141 * inside a container where cgroup namespacing is turned off to watch the host system. However, that's mostly a
142 * theoretic use case, and if people actually try all they'll lose is accounting for the top-level cgroup. Which
143 * isn't too bad. */
145 if (detect_container() > 0)
146 return false;
148 return empty_or_root(path);
151 static int process(
152 const char *controller,
153 const char *path,
154 Hashmap *a,
155 Hashmap *b,
156 unsigned iteration,
157 Group **ret) {
159 Group *g;
160 int r, all_unified;
162 assert(controller);
163 assert(path);
164 assert(a);
166 all_unified = cg_all_unified();
167 if (all_unified < 0)
168 return all_unified;
170 g = hashmap_get(a, path);
171 if (!g) {
172 g = hashmap_get(b, path);
173 if (!g) {
174 g = new0(Group, 1);
175 if (!g)
176 return -ENOMEM;
178 g->path = strdup(path);
179 if (!g->path) {
180 group_free(g);
181 return -ENOMEM;
184 r = hashmap_put(a, g->path, g);
185 if (r < 0) {
186 group_free(g);
187 return r;
189 } else {
190 r = hashmap_move_one(a, b, path);
191 if (r < 0)
192 return r;
194 g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false;
198 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) &&
199 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) {
200 _cleanup_fclose_ FILE *f = NULL;
201 pid_t pid;
203 r = cg_enumerate_processes(controller, path, &f);
204 if (r == -ENOENT)
205 return 0;
206 if (r < 0)
207 return r;
209 g->n_tasks = 0;
210 while (cg_read_pid(f, &pid, CGROUP_DONT_SKIP_UNMAPPED) > 0) {
212 if (arg_count == COUNT_USERSPACE_PROCESSES && pid_is_kernel_thread(pid) > 0)
213 continue;
215 g->n_tasks++;
218 if (g->n_tasks > 0)
219 g->n_tasks_valid = true;
221 } else if (streq(controller, "pids") && arg_count == COUNT_PIDS) {
223 if (is_root_cgroup(path)) {
224 r = procfs_tasks_get_current(&g->n_tasks);
225 if (r < 0)
226 return r;
227 } else {
228 _cleanup_free_ char *p = NULL, *v = NULL;
230 r = cg_get_path(controller, path, "pids.current", &p);
231 if (r < 0)
232 return r;
234 r = read_one_line_file(p, &v);
235 if (r == -ENOENT)
236 return 0;
237 if (r < 0)
238 return r;
240 r = safe_atou64(v, &g->n_tasks);
241 if (r < 0)
242 return r;
245 if (g->n_tasks > 0)
246 g->n_tasks_valid = true;
248 } else if (streq(controller, "memory")) {
250 if (is_root_cgroup(path)) {
251 r = procfs_memory_get_used(&g->memory);
252 if (r < 0)
253 return r;
254 } else {
255 _cleanup_free_ char *p = NULL, *v = NULL;
257 if (all_unified)
258 r = cg_get_path(controller, path, "memory.current", &p);
259 else
260 r = cg_get_path(controller, path, "memory.usage_in_bytes", &p);
261 if (r < 0)
262 return r;
264 r = read_one_line_file(p, &v);
265 if (r == -ENOENT)
266 return 0;
267 if (r < 0)
268 return r;
270 r = safe_atou64(v, &g->memory);
271 if (r < 0)
272 return r;
275 if (g->memory > 0)
276 g->memory_valid = true;
278 } else if ((streq(controller, "io") && all_unified) ||
279 (streq(controller, "blkio") && !all_unified)) {
280 _cleanup_fclose_ FILE *f = NULL;
281 _cleanup_free_ char *p = NULL;
282 uint64_t wr = 0, rd = 0;
283 nsec_t timestamp;
285 r = cg_get_path(controller, path, all_unified ? "io.stat" : "blkio.io_service_bytes", &p);
286 if (r < 0)
287 return r;
289 f = fopen(p, "re");
290 if (!f) {
291 if (errno == ENOENT)
292 return 0;
293 return -errno;
296 for (;;) {
297 _cleanup_free_ char *line = NULL;
298 uint64_t k, *q;
299 char *l;
301 r = read_stripped_line(f, LONG_LINE_MAX, &line);
302 if (r < 0)
303 return r;
304 if (r == 0)
305 break;
307 /* Skip the device */
308 l = line + strcspn(line, WHITESPACE);
309 l += strspn(l, WHITESPACE);
311 if (all_unified) {
312 while (!isempty(l)) {
313 if (sscanf(l, "rbytes=%" SCNu64, &k) == 1)
314 rd += k;
315 else if (sscanf(l, "wbytes=%" SCNu64, &k) == 1)
316 wr += k;
318 l += strcspn(l, WHITESPACE);
319 l += strspn(l, WHITESPACE);
321 } else {
322 if (first_word(l, "Read")) {
323 l += 4;
324 q = &rd;
325 } else if (first_word(l, "Write")) {
326 l += 5;
327 q = &wr;
328 } else
329 continue;
331 l += strspn(l, WHITESPACE);
332 r = safe_atou64(l, &k);
333 if (r < 0)
334 continue;
336 *q += k;
340 timestamp = now_nsec(CLOCK_MONOTONIC);
342 if (g->io_iteration == iteration - 1) {
343 uint64_t x, yr, yw;
345 x = (uint64_t) (timestamp - g->io_timestamp);
346 if (x < 1)
347 x = 1;
349 if (rd > g->io_input)
350 yr = rd - g->io_input;
351 else
352 yr = 0;
354 if (wr > g->io_output)
355 yw = wr - g->io_output;
356 else
357 yw = 0;
359 if (yr > 0 || yw > 0) {
360 g->io_input_bps = (yr * 1000000000ULL) / x;
361 g->io_output_bps = (yw * 1000000000ULL) / x;
362 g->io_valid = true;
366 g->io_input = rd;
367 g->io_output = wr;
368 g->io_timestamp = timestamp;
369 g->io_iteration = iteration;
370 } else if (STR_IN_SET(controller, "cpu", "cpuacct") || cpu_accounting_is_cheap()) {
371 _cleanup_free_ char *p = NULL, *v = NULL;
372 uint64_t new_usage;
373 nsec_t timestamp;
375 if (is_root_cgroup(path)) {
376 r = procfs_cpu_get_usage(&new_usage);
377 if (r < 0)
378 return r;
379 } else if (all_unified) {
380 _cleanup_free_ char *val = NULL;
382 if (!streq(controller, "cpu"))
383 return 0;
385 r = cg_get_keyed_attribute("cpu", path, "cpu.stat", STRV_MAKE("usage_usec"), &val);
386 if (IN_SET(r, -ENOENT, -ENXIO))
387 return 0;
388 if (r < 0)
389 return r;
391 r = safe_atou64(val, &new_usage);
392 if (r < 0)
393 return r;
395 new_usage *= NSEC_PER_USEC;
396 } else {
397 if (!streq(controller, "cpuacct"))
398 return 0;
400 r = cg_get_path(controller, path, "cpuacct.usage", &p);
401 if (r < 0)
402 return r;
404 r = read_one_line_file(p, &v);
405 if (r == -ENOENT)
406 return 0;
407 if (r < 0)
408 return r;
410 r = safe_atou64(v, &new_usage);
411 if (r < 0)
412 return r;
415 timestamp = now_nsec(CLOCK_MONOTONIC);
417 if (g->cpu_iteration == iteration - 1 &&
418 (nsec_t) new_usage > g->cpu_usage) {
420 nsec_t x, y;
422 x = timestamp - g->cpu_timestamp;
423 if (x < 1)
424 x = 1;
426 y = (nsec_t) new_usage - g->cpu_usage;
427 g->cpu_fraction = (double) y / (double) x;
428 g->cpu_valid = true;
431 g->cpu_usage = (nsec_t) new_usage;
432 g->cpu_timestamp = timestamp;
433 g->cpu_iteration = iteration;
437 if (ret)
438 *ret = g;
440 return 0;
443 static int refresh_one(
444 const char *controller,
445 const char *path,
446 Hashmap *a,
447 Hashmap *b,
448 unsigned iteration,
449 unsigned depth,
450 Group **ret) {
452 _cleanup_closedir_ DIR *d = NULL;
453 Group *ours = NULL;
454 int r;
456 assert(controller);
457 assert(path);
458 assert(a);
460 if (depth > arg_depth)
461 return 0;
463 r = process(controller, path, a, b, iteration, &ours);
464 if (r < 0)
465 return r;
467 r = cg_enumerate_subgroups(controller, path, &d);
468 if (r == -ENOENT)
469 return 0;
470 if (r < 0)
471 return r;
473 for (;;) {
474 _cleanup_free_ char *fn = NULL, *p = NULL;
475 Group *child = NULL;
477 r = cg_read_subgroup(d, &fn);
478 if (r < 0)
479 return r;
480 if (r == 0)
481 break;
483 p = path_join(path, fn);
484 if (!p)
485 return -ENOMEM;
487 path_simplify(p);
489 r = refresh_one(controller, p, a, b, iteration, depth + 1, &child);
490 if (r < 0)
491 return r;
493 if (arg_recursive &&
494 IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES) &&
495 child &&
496 child->n_tasks_valid &&
497 streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
499 /* Recursively sum up processes */
501 if (ours->n_tasks_valid)
502 ours->n_tasks += child->n_tasks;
503 else {
504 ours->n_tasks = child->n_tasks;
505 ours->n_tasks_valid = true;
510 if (ret)
511 *ret = ours;
513 return 1;
516 static int refresh(const char *root, Hashmap *a, Hashmap *b, unsigned iteration) {
517 int r;
519 FOREACH_STRING(c, SYSTEMD_CGROUP_CONTROLLER, "cpu", "cpuacct", "memory", "io", "blkio", "pids") {
520 r = refresh_one(c, root, a, b, iteration, 0, NULL);
521 if (r < 0)
522 return r;
525 return 0;
528 static int group_compare(Group * const *a, Group * const *b) {
529 const Group *x = *a, *y = *b;
530 int r;
532 if (arg_order != ORDER_TASKS || arg_recursive) {
533 /* Let's make sure that the parent is always before
534 * the child. Except when ordering by tasks and
535 * recursive summing is off, since that is actually
536 * not accumulative for all children. */
538 if (path_startswith(empty_to_root(y->path), empty_to_root(x->path)))
539 return -1;
540 if (path_startswith(empty_to_root(x->path), empty_to_root(y->path)))
541 return 1;
544 switch (arg_order) {
546 case ORDER_PATH:
547 break;
549 case ORDER_CPU:
550 if (arg_cpu_type == CPU_PERCENT) {
551 if (x->cpu_valid && y->cpu_valid) {
552 r = CMP(y->cpu_fraction, x->cpu_fraction);
553 if (r != 0)
554 return r;
555 } else if (x->cpu_valid)
556 return -1;
557 else if (y->cpu_valid)
558 return 1;
559 } else {
560 r = CMP(y->cpu_usage, x->cpu_usage);
561 if (r != 0)
562 return r;
565 break;
567 case ORDER_TASKS:
568 if (x->n_tasks_valid && y->n_tasks_valid) {
569 r = CMP(y->n_tasks, x->n_tasks);
570 if (r != 0)
571 return r;
572 } else if (x->n_tasks_valid)
573 return -1;
574 else if (y->n_tasks_valid)
575 return 1;
577 break;
579 case ORDER_MEMORY:
580 if (x->memory_valid && y->memory_valid) {
581 r = CMP(y->memory, x->memory);
582 if (r != 0)
583 return r;
584 } else if (x->memory_valid)
585 return -1;
586 else if (y->memory_valid)
587 return 1;
589 break;
591 case ORDER_IO:
592 if (x->io_valid && y->io_valid) {
593 r = CMP(y->io_input_bps + y->io_output_bps, x->io_input_bps + x->io_output_bps);
594 if (r != 0)
595 return r;
596 } else if (x->io_valid)
597 return -1;
598 else if (y->io_valid)
599 return 1;
602 return path_compare(x->path, y->path);
605 static void display(Hashmap *a) {
606 Group *g;
607 Group **array;
608 signed path_columns;
609 unsigned rows, n = 0, maxtcpu = 0, maxtpath = 3; /* 3 for ellipsize() to work properly */
611 assert(a);
613 if (!terminal_is_dumb())
614 fputs(ANSI_HOME_CLEAR, stdout);
616 array = newa(Group*, hashmap_size(a));
618 HASHMAP_FOREACH(g, a)
619 if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
620 array[n++] = g;
622 typesafe_qsort(array, n, group_compare);
624 /* Find the longest names in one run */
625 for (unsigned j = 0; j < n; j++) {
626 maxtcpu = MAX(maxtcpu,
627 strlen(MAYBE_FORMAT_TIMESPAN((usec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0)));
628 maxtpath = MAX(maxtpath,
629 strlen(array[j]->path));
632 rows = lines();
633 if (rows <= 10)
634 rows = 10;
636 if (on_tty()) {
637 const char *on, *off;
638 int cpu_len = arg_cpu_type == CPU_PERCENT ? 6 : maxtcpu;
640 path_columns = columns() - 36 - cpu_len;
641 if (path_columns < 10)
642 path_columns = 10;
644 on = ansi_highlight_underline();
645 off = ansi_underline();
647 printf("%s%s%-*s%s %s%7s%s %s%*s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
648 ansi_underline(),
649 arg_order == ORDER_PATH ? on : "", path_columns, "CGroup",
650 arg_order == ORDER_PATH ? off : "",
651 arg_order == ORDER_TASKS ? on : "",
652 arg_count == COUNT_PIDS ? "Tasks" : arg_count == COUNT_USERSPACE_PROCESSES ? "Procs" : "Proc+",
653 arg_order == ORDER_TASKS ? off : "",
654 arg_order == ORDER_CPU ? on : "",
655 cpu_len,
656 arg_cpu_type == CPU_PERCENT ? "%CPU" : "CPU Time",
657 arg_order == ORDER_CPU ? off : "",
658 arg_order == ORDER_MEMORY ? on : "", "Memory",
659 arg_order == ORDER_MEMORY ? off : "",
660 arg_order == ORDER_IO ? on : "", "Input/s",
661 arg_order == ORDER_IO ? off : "",
662 arg_order == ORDER_IO ? on : "", "Output/s",
663 arg_order == ORDER_IO ? off : "",
664 ansi_normal());
665 } else
666 path_columns = maxtpath;
668 for (unsigned j = 0; j < n; j++) {
669 _cleanup_free_ char *ellipsized = NULL;
670 const char *path;
672 if (on_tty() && j + 6 > rows)
673 break;
675 g = array[j];
677 path = empty_to_root(g->path);
678 ellipsized = ellipsize(path, path_columns, 33);
679 printf("%-*s", path_columns, ellipsized ?: path);
681 if (g->n_tasks_valid)
682 printf(" %7" PRIu64, g->n_tasks);
683 else
684 fputs(" -", stdout);
686 if (arg_cpu_type == CPU_PERCENT) {
687 if (g->cpu_valid)
688 printf(" %6.1f", g->cpu_fraction*100);
689 else
690 fputs(" -", stdout);
691 } else
692 printf(" %*s",
693 (int) maxtcpu,
694 MAYBE_FORMAT_TIMESPAN((usec_t) (g->cpu_usage / NSEC_PER_USEC), 0));
696 printf(" %8s", MAYBE_FORMAT_BYTES(g->memory_valid, g->memory));
697 printf(" %8s", MAYBE_FORMAT_BYTES(g->io_valid, g->io_input_bps));
698 printf(" %8s", MAYBE_FORMAT_BYTES(g->io_valid, g->io_output_bps));
700 putchar('\n');
704 static int help(void) {
705 _cleanup_free_ char *link = NULL;
706 int r;
708 r = terminal_urlify_man("systemd-cgtop", "1", &link);
709 if (r < 0)
710 return log_oom();
712 printf("%s [OPTIONS...] [CGROUP]\n\n"
713 "Show top control groups by their resource usage.\n\n"
714 " -h --help Show this help\n"
715 " --version Show package version\n"
716 " -p --order=path Order by path\n"
717 " -t --order=tasks Order by number of tasks/processes\n"
718 " -c --order=cpu Order by CPU load (default)\n"
719 " -m --order=memory Order by memory load\n"
720 " -i --order=io Order by IO load\n"
721 " -r --raw Provide raw (not human-readable) numbers\n"
722 " --cpu=percentage Show CPU usage as percentage (default)\n"
723 " --cpu=time Show CPU usage as time\n"
724 " -P Count userspace processes instead of tasks (excl. kernel)\n"
725 " -k Count all processes instead of tasks (incl. kernel)\n"
726 " --recursive=BOOL Sum up process count recursively\n"
727 " -d --delay=DELAY Delay between updates\n"
728 " -n --iterations=N Run for N iterations before exiting\n"
729 " -1 Shortcut for --iterations=1\n"
730 " -b --batch Run in batch mode, accepting no input\n"
731 " --depth=DEPTH Maximum traversal depth (default: %u)\n"
732 " -M --machine= Show container\n"
733 "\nSee the %s for details.\n",
734 program_invocation_short_name,
735 arg_depth,
736 link);
738 return 0;
741 static int parse_argv(int argc, char *argv[]) {
742 enum {
743 ARG_VERSION = 0x100,
744 ARG_DEPTH,
745 ARG_CPU_TYPE,
746 ARG_ORDER,
747 ARG_RECURSIVE,
750 static const struct option options[] = {
751 { "help", no_argument, NULL, 'h' },
752 { "version", no_argument, NULL, ARG_VERSION },
753 { "delay", required_argument, NULL, 'd' },
754 { "iterations", required_argument, NULL, 'n' },
755 { "batch", no_argument, NULL, 'b' },
756 { "raw", no_argument, NULL, 'r' },
757 { "depth", required_argument, NULL, ARG_DEPTH },
758 { "cpu", optional_argument, NULL, ARG_CPU_TYPE },
759 { "order", required_argument, NULL, ARG_ORDER },
760 { "recursive", required_argument, NULL, ARG_RECURSIVE },
761 { "machine", required_argument, NULL, 'M' },
765 int c, r;
767 assert(argc >= 1);
768 assert(argv);
770 while ((c = getopt_long(argc, argv, "hptcmin:brd:kPM:1", options, NULL)) >= 0)
772 switch (c) {
774 case 'h':
775 return help();
777 case ARG_VERSION:
778 return version();
780 case ARG_CPU_TYPE:
781 if (optarg) {
782 if (streq(optarg, "time"))
783 arg_cpu_type = CPU_TIME;
784 else if (streq(optarg, "percentage"))
785 arg_cpu_type = CPU_PERCENT;
786 else
787 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
788 "Unknown argument to --cpu=: %s",
789 optarg);
790 } else
791 arg_cpu_type = CPU_TIME;
793 break;
795 case ARG_DEPTH:
796 r = safe_atou(optarg, &arg_depth);
797 if (r < 0)
798 return log_error_errno(r, "Failed to parse depth parameter '%s': %m", optarg);
800 break;
802 case 'd':
803 r = parse_sec(optarg, &arg_delay);
804 if (r < 0)
805 return log_error_errno(r, "Failed to parse delay parameter '%s': %m", optarg);
806 if (arg_delay <= 0)
807 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
808 "Invalid delay parameter '%s'",
809 optarg);
811 break;
813 case 'n':
814 r = safe_atou(optarg, &arg_iterations);
815 if (r < 0)
816 return log_error_errno(r, "Failed to parse iterations parameter '%s': %m", optarg);
818 break;
820 case '1':
821 arg_iterations = 1;
822 break;
824 case 'b':
825 arg_batch = true;
826 break;
828 case 'r':
829 arg_raw = true;
830 break;
832 case 'p':
833 arg_order = ORDER_PATH;
834 break;
836 case 't':
837 arg_order = ORDER_TASKS;
838 break;
840 case 'c':
841 arg_order = ORDER_CPU;
842 break;
844 case 'm':
845 arg_order = ORDER_MEMORY;
846 break;
848 case 'i':
849 arg_order = ORDER_IO;
850 break;
852 case ARG_ORDER:
853 if (streq(optarg, "path"))
854 arg_order = ORDER_PATH;
855 else if (streq(optarg, "tasks"))
856 arg_order = ORDER_TASKS;
857 else if (streq(optarg, "cpu"))
858 arg_order = ORDER_CPU;
859 else if (streq(optarg, "memory"))
860 arg_order = ORDER_MEMORY;
861 else if (streq(optarg, "io"))
862 arg_order = ORDER_IO;
863 else
864 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
865 "Invalid argument to --order=: %s",
866 optarg);
867 break;
869 case 'k':
870 arg_count = COUNT_ALL_PROCESSES;
871 break;
873 case 'P':
874 arg_count = COUNT_USERSPACE_PROCESSES;
875 break;
877 case ARG_RECURSIVE:
878 r = parse_boolean_argument("--recursive=", optarg, &arg_recursive);
879 if (r < 0)
880 return r;
882 arg_recursive_unset = !r;
883 break;
885 case 'M':
886 arg_machine = optarg;
887 break;
889 case '?':
890 return -EINVAL;
892 default:
893 assert_not_reached();
896 if (optind == argc - 1)
897 arg_root = argv[optind];
898 else if (optind < argc)
899 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
900 "Too many arguments.");
902 return 1;
905 static const char* counting_what(void) {
906 if (arg_count == COUNT_PIDS)
907 return "tasks";
908 else if (arg_count == COUNT_ALL_PROCESSES)
909 return "all processes (incl. kernel)";
910 else
911 return "userspace processes (excl. kernel)";
914 static int loop(const char *root) {
915 _cleanup_hashmap_free_ Hashmap *a = NULL, *b = NULL;
916 unsigned iteration = 0;
917 usec_t last_refresh = 0;
918 bool immediate_refresh = false;
919 int r;
921 a = hashmap_new(&group_hash_ops);
922 b = hashmap_new(&group_hash_ops);
923 if (!a || !b)
924 return log_oom();
926 for (;;) {
927 usec_t t;
928 char key;
930 t = now(CLOCK_MONOTONIC);
932 if (t >= usec_add(last_refresh, arg_delay) || immediate_refresh) {
934 r = refresh(root, a, b, iteration++);
935 if (r < 0)
936 return log_error_errno(r, "Failed to refresh: %m");
938 hashmap_clear(b);
939 SWAP_TWO(a, b);
941 last_refresh = t;
942 immediate_refresh = false;
945 display(b);
947 if (arg_iterations && iteration >= arg_iterations)
948 return 0;
950 if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
951 fputs("\n", stdout);
952 fflush(stdout);
954 if (arg_batch)
955 (void) usleep_safe(usec_add(usec_sub_unsigned(last_refresh, t), arg_delay));
956 else {
957 r = read_one_char(stdin, &key, usec_add(usec_sub_unsigned(last_refresh, t), arg_delay), NULL);
958 if (r == -ETIMEDOUT)
959 continue;
960 if (r < 0)
961 return log_error_errno(r, "Couldn't read key: %m");
964 if (on_tty()) { /* TTY: Clear any user keystroke */
965 fputs("\r \r", stdout);
966 fflush(stdout);
969 if (arg_batch)
970 continue;
972 switch (key) {
974 case ' ':
975 immediate_refresh = true;
976 break;
978 case 'q':
979 return 0;
981 case 'p':
982 arg_order = ORDER_PATH;
983 break;
985 case 't':
986 arg_order = ORDER_TASKS;
987 break;
989 case 'c':
990 arg_order = ORDER_CPU;
991 break;
993 case 'm':
994 arg_order = ORDER_MEMORY;
995 break;
997 case 'i':
998 arg_order = ORDER_IO;
999 break;
1001 case '%':
1002 arg_cpu_type = arg_cpu_type == CPU_TIME ? CPU_PERCENT : CPU_TIME;
1003 break;
1005 case 'k':
1006 arg_count = arg_count != COUNT_ALL_PROCESSES ? COUNT_ALL_PROCESSES : COUNT_PIDS;
1007 fprintf(stdout, "\nCounting: %s.", counting_what());
1008 fflush(stdout);
1009 sleep(1);
1010 break;
1012 case 'P':
1013 arg_count = arg_count != COUNT_USERSPACE_PROCESSES ? COUNT_USERSPACE_PROCESSES : COUNT_PIDS;
1014 fprintf(stdout, "\nCounting: %s.", counting_what());
1015 fflush(stdout);
1016 sleep(1);
1017 break;
1019 case 'r':
1020 if (arg_count == COUNT_PIDS)
1021 fprintf(stdout, "\n\aCannot toggle recursive counting, not available in task counting mode.");
1022 else {
1023 arg_recursive = !arg_recursive;
1024 fprintf(stdout, "\nRecursive process counting: %s", yes_no(arg_recursive));
1026 fflush(stdout);
1027 sleep(1);
1028 break;
1030 case '+':
1031 arg_delay = usec_add(arg_delay, arg_delay < USEC_PER_SEC ? USEC_PER_MSEC * 250 : USEC_PER_SEC);
1033 fprintf(stdout, "\nIncreased delay to %s.", FORMAT_TIMESPAN(arg_delay, 0));
1034 fflush(stdout);
1035 sleep(1);
1036 break;
1038 case '-':
1039 if (arg_delay <= USEC_PER_MSEC*500)
1040 arg_delay = USEC_PER_MSEC*250;
1041 else
1042 arg_delay = usec_sub_unsigned(arg_delay, arg_delay < USEC_PER_MSEC * 1250 ? USEC_PER_MSEC * 250 : USEC_PER_SEC);
1044 fprintf(stdout, "\nDecreased delay to %s.", FORMAT_TIMESPAN(arg_delay, 0));
1045 fflush(stdout);
1046 sleep(1);
1047 break;
1049 case '?':
1050 case 'h':
1052 fprintf(stdout,
1053 "\t<%1$sp%2$s> By path; <%1$st%2$s> By tasks/procs; <%1$sc%2$s> By CPU; <%1$sm%2$s> By memory; <%1$si%2$s> By I/O\n"
1054 "\t<%1$s+%2$s> Inc. delay; <%1$s-%2$s> Dec. delay; <%1$s%%%2$s> Toggle time; <%1$sSPACE%2$s> Refresh\n"
1055 "\t<%1$sP%2$s> Toggle count userspace processes; <%1$sk%2$s> Toggle count all processes\n"
1056 "\t<%1$sr%2$s> Count processes recursively; <%1$sq%2$s> Quit",
1057 ansi_highlight(), ansi_normal());
1058 fflush(stdout);
1059 sleep(3);
1060 break;
1062 default:
1063 if (key < ' ')
1064 fprintf(stdout, "\nUnknown key '\\x%x'. Ignoring.", (unsigned) key);
1065 else
1066 fprintf(stdout, "\nUnknown key '%c'. Ignoring.", key);
1067 fflush(stdout);
1068 sleep(1);
1069 break;
1074 static int run(int argc, char *argv[]) {
1075 _cleanup_free_ char *root = NULL;
1076 CGroupMask mask;
1077 int r;
1079 log_setup();
1081 r = parse_argv(argc, argv);
1082 if (r <= 0)
1083 return r;
1085 r = cg_mask_supported(&mask);
1086 if (r < 0)
1087 return log_error_errno(r, "Failed to determine supported controllers: %m");
1089 /* honor user selection unless pids controller is unavailable */
1090 PidsCount possible_count = (mask & CGROUP_MASK_PIDS) ? COUNT_PIDS : COUNT_ALL_PROCESSES;
1091 arg_count = MIN(possible_count, arg_count);
1093 if (arg_recursive_unset && arg_count == COUNT_PIDS)
1094 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
1095 "Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
1097 r = show_cgroup_get_path_and_warn(arg_machine, arg_root, &root);
1098 if (r < 0)
1099 return log_error_errno(r, "Failed to get root control group path: %m");
1100 log_debug("CGroup path: %s", root);
1102 signal(SIGWINCH, columns_lines_cache_reset);
1104 if (arg_iterations == UINT_MAX)
1105 arg_iterations = on_tty() ? 0 : 1;
1107 return loop(root);
1110 DEFINE_MAIN_FUNCTION(run);