2012-01-22 18:21:15 +01:00
|
|
|
/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
|
|
|
|
|
|
|
|
/***
|
|
|
|
This file is part of systemd.
|
|
|
|
|
|
|
|
Copyright 2012 Lennart Poettering
|
|
|
|
|
|
|
|
systemd is free software; you can redistribute it and/or modify it
|
2012-04-12 00:20:58 +02:00
|
|
|
under the terms of the GNU Lesser General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2.1 of the License, or
|
2012-01-22 18:21:15 +01:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
systemd is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
2012-04-12 00:20:58 +02:00
|
|
|
Lesser General Public License for more details.
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2012-04-12 00:20:58 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2012-01-22 18:21:15 +01:00
|
|
|
along with systemd; If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
***/
|
|
|
|
|
|
|
|
#include <alloca.h>
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <errno.h>
|
2012-01-22 18:21:15 +01:00
|
|
|
#include <getopt.h>
|
2015-07-17 10:38:31 +02:00
|
|
|
#include <signal.h>
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-09-21 14:04:45 +02:00
|
|
|
#include "sd-bus.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
#include "alloc-util.h"
|
2015-09-21 14:04:45 +02:00
|
|
|
#include "bus-error.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "bus-util.h"
|
|
|
|
#include "cgroup-util.h"
|
2015-10-25 13:14:12 +01:00
|
|
|
#include "fd-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "fileio.h"
|
|
|
|
#include "hashmap.h"
|
2015-10-26 16:18:16 +01:00
|
|
|
#include "parse-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "path-util.h"
|
|
|
|
#include "process-util.h"
|
2016-01-12 15:34:20 +01:00
|
|
|
#include "stdio-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "terminal-util.h"
|
2015-09-21 14:04:45 +02:00
|
|
|
#include "unit-name.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "util.h"
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
typedef struct Group {
|
|
|
|
char *path;
|
|
|
|
|
|
|
|
bool n_tasks_valid:1;
|
|
|
|
bool cpu_valid:1;
|
|
|
|
bool memory_valid:1;
|
|
|
|
bool io_valid:1;
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
uint64_t n_tasks;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
unsigned cpu_iteration;
|
2015-08-28 02:04:33 +02:00
|
|
|
nsec_t cpu_usage;
|
|
|
|
nsec_t cpu_timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
double cpu_fraction;
|
|
|
|
|
|
|
|
uint64_t memory;
|
|
|
|
|
|
|
|
unsigned io_iteration;
|
|
|
|
uint64_t io_input, io_output;
|
2015-08-28 02:04:33 +02:00
|
|
|
nsec_t io_timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
uint64_t io_input_bps, io_output_bps;
|
|
|
|
} Group;
|
|
|
|
|
2012-05-22 01:48:40 +02:00
|
|
|
static unsigned arg_depth = 3;
|
2015-08-28 02:04:33 +02:00
|
|
|
static unsigned arg_iterations = (unsigned) -1;
|
2012-07-26 01:33:07 +02:00
|
|
|
static bool arg_batch = false;
|
2015-05-22 23:20:49 +02:00
|
|
|
static bool arg_raw = false;
|
2012-01-22 18:21:15 +01:00
|
|
|
static usec_t arg_delay = 1*USEC_PER_SEC;
|
2015-09-21 14:04:45 +02:00
|
|
|
static char* arg_machine = NULL;
|
2015-09-10 12:32:16 +02:00
|
|
|
|
|
|
|
enum {
|
|
|
|
COUNT_PIDS,
|
|
|
|
COUNT_USERSPACE_PROCESSES,
|
|
|
|
COUNT_ALL_PROCESSES,
|
|
|
|
} arg_count = COUNT_PIDS;
|
2015-08-30 15:11:35 +02:00
|
|
|
static bool arg_recursive = true;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
static enum {
|
|
|
|
ORDER_PATH,
|
|
|
|
ORDER_TASKS,
|
|
|
|
ORDER_CPU,
|
|
|
|
ORDER_MEMORY,
|
2015-09-10 12:32:16 +02:00
|
|
|
ORDER_IO,
|
2012-01-22 18:21:15 +01:00
|
|
|
} arg_order = ORDER_CPU;
|
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
static enum {
|
|
|
|
CPU_PERCENT,
|
|
|
|
CPU_TIME,
|
|
|
|
} arg_cpu_type = CPU_PERCENT;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
static void group_free(Group *g) {
|
|
|
|
assert(g);
|
|
|
|
|
|
|
|
free(g->path);
|
|
|
|
free(g);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void group_hashmap_clear(Hashmap *h) {
|
|
|
|
Group *g;
|
|
|
|
|
|
|
|
while ((g = hashmap_steal_first(h)))
|
|
|
|
group_free(g);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void group_hashmap_free(Hashmap *h) {
|
|
|
|
group_hashmap_clear(h);
|
|
|
|
hashmap_free(h);
|
|
|
|
}
|
|
|
|
|
2015-09-10 18:16:18 +02:00
|
|
|
static const char *maybe_format_bytes(char *buf, size_t l, bool is_valid, uint64_t t) {
|
2015-05-22 23:20:49 +02:00
|
|
|
if (!is_valid)
|
|
|
|
return "-";
|
|
|
|
if (arg_raw) {
|
|
|
|
snprintf(buf, l, "%jd", t);
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
return format_bytes(buf, l, t);
|
|
|
|
}
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
static int process(
|
|
|
|
const char *controller,
|
|
|
|
const char *path,
|
|
|
|
Hashmap *a,
|
|
|
|
Hashmap *b,
|
|
|
|
unsigned iteration,
|
|
|
|
Group **ret) {
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
Group *g;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(controller);
|
|
|
|
assert(path);
|
|
|
|
assert(a);
|
|
|
|
|
|
|
|
g = hashmap_get(a, path);
|
|
|
|
if (!g) {
|
|
|
|
g = hashmap_get(b, path);
|
|
|
|
if (!g) {
|
|
|
|
g = new0(Group, 1);
|
|
|
|
if (!g)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
g->path = strdup(path);
|
|
|
|
if (!g->path) {
|
|
|
|
group_free(g);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = hashmap_put(a, g->path, g);
|
|
|
|
if (r < 0) {
|
|
|
|
group_free(g);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
} else {
|
2014-10-04 21:29:10 +02:00
|
|
|
r = hashmap_move_one(a, b, path);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2015-08-28 02:04:33 +02:00
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
g->cpu_valid = g->memory_valid = g->io_valid = g->n_tasks_valid = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
if (streq(controller, SYSTEMD_CGROUP_CONTROLLER) && IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES)) {
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_fclose_ FILE *f = NULL;
|
|
|
|
pid_t pid;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
r = cg_enumerate_processes(controller, path, &f);
|
|
|
|
if (r == -ENOENT)
|
|
|
|
return 0;
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
g->n_tasks = 0;
|
2015-08-28 19:31:07 +02:00
|
|
|
while (cg_read_pid(f, &pid) > 0) {
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
if (arg_count == COUNT_USERSPACE_PROCESSES && is_kernel_thread(pid) > 0)
|
2015-08-28 19:31:07 +02:00
|
|
|
continue;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
g->n_tasks++;
|
2015-08-28 19:31:07 +02:00
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (g->n_tasks > 0)
|
|
|
|
g->n_tasks_valid = true;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
} else if (streq(controller, "pids") && arg_count == COUNT_PIDS) {
|
|
|
|
_cleanup_free_ char *p = NULL, *v = NULL;
|
|
|
|
|
|
|
|
r = cg_get_path(controller, path, "pids.current", &p);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = read_one_line_file(p, &v);
|
|
|
|
if (r == -ENOENT)
|
|
|
|
return 0;
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = safe_atou64(v, &g->n_tasks);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (g->n_tasks > 0)
|
|
|
|
g->n_tasks_valid = true;
|
|
|
|
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
} else if (streq(controller, "cpuacct") && cg_unified() <= 0) {
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_free_ char *p = NULL, *v = NULL;
|
2012-01-22 18:21:15 +01:00
|
|
|
uint64_t new_usage;
|
2015-08-28 02:04:33 +02:00
|
|
|
nsec_t timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
r = cg_get_path(controller, path, "cpuacct.usage", &p);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = read_one_line_file(p, &v);
|
2015-08-28 02:04:33 +02:00
|
|
|
if (r == -ENOENT)
|
|
|
|
return 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = safe_atou64(v, &new_usage);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
timestamp = now_nsec(CLOCK_MONOTONIC);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (g->cpu_iteration == iteration - 1 &&
|
|
|
|
(nsec_t) new_usage > g->cpu_usage) {
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
nsec_t x, y;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
x = timestamp - g->cpu_timestamp;
|
|
|
|
if (x < 1)
|
|
|
|
x = 1;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
y = (nsec_t) new_usage - g->cpu_usage;
|
|
|
|
g->cpu_fraction = (double) y / (double) x;
|
|
|
|
g->cpu_valid = true;
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
g->cpu_usage = (nsec_t) new_usage;
|
|
|
|
g->cpu_timestamp = timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
g->cpu_iteration = iteration;
|
|
|
|
|
|
|
|
} else if (streq(controller, "memory")) {
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_free_ char *p = NULL, *v = NULL;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
if (cg_unified() <= 0)
|
|
|
|
r = cg_get_path(controller, path, "memory.usage_in_bytes", &p);
|
|
|
|
else
|
|
|
|
r = cg_get_path(controller, path, "memory.current", &p);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = read_one_line_file(p, &v);
|
2015-08-28 02:04:33 +02:00
|
|
|
if (r == -ENOENT)
|
|
|
|
return 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = safe_atou64(v, &g->memory);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (g->memory > 0)
|
|
|
|
g->memory_valid = true;
|
|
|
|
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
} else if (streq(controller, "blkio") && cg_unified() <= 0) {
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_fclose_ FILE *f = NULL;
|
|
|
|
_cleanup_free_ char *p = NULL;
|
2012-01-22 18:21:15 +01:00
|
|
|
uint64_t wr = 0, rd = 0;
|
2015-08-28 02:04:33 +02:00
|
|
|
nsec_t timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
r = cg_get_path(controller, path, "blkio.io_service_bytes", &p);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
f = fopen(p, "re");
|
2015-08-28 02:04:33 +02:00
|
|
|
if (!f) {
|
|
|
|
if (errno == ENOENT)
|
|
|
|
return 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
return -errno;
|
2015-08-28 02:04:33 +02:00
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
char line[LINE_MAX], *l;
|
|
|
|
uint64_t k, *q;
|
|
|
|
|
|
|
|
if (!fgets(line, sizeof(line), f))
|
|
|
|
break;
|
|
|
|
|
|
|
|
l = strstrip(line);
|
|
|
|
l += strcspn(l, WHITESPACE);
|
|
|
|
l += strspn(l, WHITESPACE);
|
|
|
|
|
|
|
|
if (first_word(l, "Read")) {
|
|
|
|
l += 4;
|
|
|
|
q = &rd;
|
|
|
|
} else if (first_word(l, "Write")) {
|
|
|
|
l += 5;
|
|
|
|
q = ≀
|
|
|
|
} else
|
|
|
|
continue;
|
|
|
|
|
|
|
|
l += strspn(l, WHITESPACE);
|
|
|
|
r = safe_atou64(l, &k);
|
|
|
|
if (r < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
*q += k;
|
|
|
|
}
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
timestamp = now_nsec(CLOCK_MONOTONIC);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
if (g->io_iteration == iteration - 1) {
|
|
|
|
uint64_t x, yr, yw;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
x = (uint64_t) (timestamp - g->io_timestamp);
|
|
|
|
if (x < 1)
|
|
|
|
x = 1;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (rd > g->io_input)
|
|
|
|
yr = rd - g->io_input;
|
|
|
|
else
|
|
|
|
yr = 0;
|
|
|
|
|
|
|
|
if (wr > g->io_output)
|
|
|
|
yw = wr - g->io_output;
|
|
|
|
else
|
|
|
|
yw = 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (yr > 0 || yw > 0) {
|
2012-01-22 18:21:15 +01:00
|
|
|
g->io_input_bps = (yr * 1000000000ULL) / x;
|
|
|
|
g->io_output_bps = (yw * 1000000000ULL) / x;
|
|
|
|
g->io_valid = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g->io_input = rd;
|
|
|
|
g->io_output = wr;
|
2015-08-28 02:04:33 +02:00
|
|
|
g->io_timestamp = timestamp;
|
2012-01-22 18:21:15 +01:00
|
|
|
g->io_iteration = iteration;
|
|
|
|
}
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
if (ret)
|
|
|
|
*ret = g;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int refresh_one(
|
|
|
|
const char *controller,
|
|
|
|
const char *path,
|
|
|
|
Hashmap *a,
|
|
|
|
Hashmap *b,
|
|
|
|
unsigned iteration,
|
2015-08-30 15:11:35 +02:00
|
|
|
unsigned depth,
|
|
|
|
Group **ret) {
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_closedir_ DIR *d = NULL;
|
2015-08-30 15:11:35 +02:00
|
|
|
Group *ours;
|
2012-01-22 18:21:15 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(controller);
|
|
|
|
assert(path);
|
|
|
|
assert(a);
|
|
|
|
|
|
|
|
if (depth > arg_depth)
|
|
|
|
return 0;
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
r = process(controller, path, a, b, iteration, &ours);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = cg_enumerate_subgroups(controller, path, &d);
|
2015-08-28 02:04:33 +02:00
|
|
|
if (r == -ENOENT)
|
|
|
|
return 0;
|
|
|
|
if (r < 0)
|
2012-01-22 18:21:15 +01:00
|
|
|
return r;
|
|
|
|
|
|
|
|
for (;;) {
|
2015-08-28 02:04:33 +02:00
|
|
|
_cleanup_free_ char *fn = NULL, *p = NULL;
|
2015-08-30 15:11:35 +02:00
|
|
|
Group *child = NULL;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
r = cg_read_subgroup(d, &fn);
|
2015-08-30 15:11:35 +02:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
2015-08-30 15:11:35 +02:00
|
|
|
if (r == 0)
|
|
|
|
break;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2012-07-13 13:41:01 +02:00
|
|
|
p = strjoin(path, "/", fn, NULL);
|
2015-08-28 02:04:33 +02:00
|
|
|
if (!p)
|
|
|
|
return -ENOMEM;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
path_kill_slashes(p);
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
r = refresh_one(controller, p, a, b, iteration, depth + 1, &child);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
2015-08-30 15:11:35 +02:00
|
|
|
|
|
|
|
if (arg_recursive &&
|
2015-09-10 12:32:16 +02:00
|
|
|
IN_SET(arg_count, COUNT_ALL_PROCESSES, COUNT_USERSPACE_PROCESSES) &&
|
2015-08-30 15:11:35 +02:00
|
|
|
child &&
|
|
|
|
child->n_tasks_valid &&
|
|
|
|
streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
|
|
|
|
|
|
|
|
/* Recursively sum up processes */
|
|
|
|
|
|
|
|
if (ours->n_tasks_valid)
|
|
|
|
ours->n_tasks += child->n_tasks;
|
|
|
|
else {
|
|
|
|
ours->n_tasks = child->n_tasks;
|
|
|
|
ours->n_tasks_valid = true;
|
|
|
|
}
|
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
if (ret)
|
|
|
|
*ret = ours;
|
|
|
|
|
|
|
|
return 1;
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
2015-08-28 19:17:47 +02:00
|
|
|
static int refresh(const char *root, Hashmap *a, Hashmap *b, unsigned iteration) {
|
2012-01-22 18:21:15 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(a);
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
r = refresh_one(SYSTEMD_CGROUP_CONTROLLER, root, a, b, iteration, 0, NULL);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
2015-08-30 15:11:35 +02:00
|
|
|
r = refresh_one("cpuacct", root, a, b, iteration, 0, NULL);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
2015-08-30 15:11:35 +02:00
|
|
|
r = refresh_one("memory", root, a, b, iteration, 0, NULL);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
2015-08-30 15:11:35 +02:00
|
|
|
r = refresh_one("blkio", root, a, b, iteration, 0, NULL);
|
2015-09-10 12:32:16 +02:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = refresh_one("pids", root, a, b, iteration, 0, NULL);
|
2012-05-22 07:54:41 +02:00
|
|
|
if (r < 0)
|
2015-08-28 02:04:33 +02:00
|
|
|
return r;
|
|
|
|
|
2012-05-22 07:54:41 +02:00
|
|
|
return 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int group_compare(const void*a, const void *b) {
|
|
|
|
const Group *x = *(Group**)a, *y = *(Group**)b;
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
if (arg_order != ORDER_TASKS || arg_recursive) {
|
2015-08-28 02:04:33 +02:00
|
|
|
/* Let's make sure that the parent is always before
|
2015-08-30 15:11:35 +02:00
|
|
|
* the child. Except when ordering by tasks and
|
|
|
|
* recursive summing is off, since that is actually
|
|
|
|
* not accumulative for all children. */
|
2015-08-28 02:04:33 +02:00
|
|
|
|
|
|
|
if (path_startswith(y->path, x->path))
|
|
|
|
return -1;
|
|
|
|
if (path_startswith(x->path, y->path))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (arg_order) {
|
|
|
|
|
|
|
|
case ORDER_PATH:
|
|
|
|
break;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
case ORDER_CPU:
|
2013-04-02 18:52:16 +02:00
|
|
|
if (arg_cpu_type == CPU_PERCENT) {
|
|
|
|
if (x->cpu_valid && y->cpu_valid) {
|
|
|
|
if (x->cpu_fraction > y->cpu_fraction)
|
|
|
|
return -1;
|
|
|
|
else if (x->cpu_fraction < y->cpu_fraction)
|
|
|
|
return 1;
|
|
|
|
} else if (x->cpu_valid)
|
2012-01-22 18:21:15 +01:00
|
|
|
return -1;
|
2013-04-02 18:52:16 +02:00
|
|
|
else if (y->cpu_valid)
|
2012-01-22 18:21:15 +01:00
|
|
|
return 1;
|
2013-04-02 18:52:16 +02:00
|
|
|
} else {
|
|
|
|
if (x->cpu_usage > y->cpu_usage)
|
|
|
|
return -1;
|
|
|
|
else if (x->cpu_usage < y->cpu_usage)
|
|
|
|
return 1;
|
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
break;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
case ORDER_TASKS:
|
2012-01-22 18:21:15 +01:00
|
|
|
if (x->n_tasks_valid && y->n_tasks_valid) {
|
|
|
|
if (x->n_tasks > y->n_tasks)
|
|
|
|
return -1;
|
|
|
|
else if (x->n_tasks < y->n_tasks)
|
|
|
|
return 1;
|
|
|
|
} else if (x->n_tasks_valid)
|
|
|
|
return -1;
|
|
|
|
else if (y->n_tasks_valid)
|
|
|
|
return 1;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ORDER_MEMORY:
|
2012-01-22 18:21:15 +01:00
|
|
|
if (x->memory_valid && y->memory_valid) {
|
|
|
|
if (x->memory > y->memory)
|
|
|
|
return -1;
|
|
|
|
else if (x->memory < y->memory)
|
|
|
|
return 1;
|
|
|
|
} else if (x->memory_valid)
|
|
|
|
return -1;
|
|
|
|
else if (y->memory_valid)
|
|
|
|
return 1;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
break;
|
|
|
|
|
|
|
|
case ORDER_IO:
|
2012-01-22 18:21:15 +01:00
|
|
|
if (x->io_valid && y->io_valid) {
|
|
|
|
if (x->io_input_bps + x->io_output_bps > y->io_input_bps + y->io_output_bps)
|
|
|
|
return -1;
|
|
|
|
else if (x->io_input_bps + x->io_output_bps < y->io_input_bps + y->io_output_bps)
|
|
|
|
return 1;
|
|
|
|
} else if (x->io_valid)
|
|
|
|
return -1;
|
|
|
|
else if (y->io_valid)
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
return path_compare(x->path, y->path);
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
2015-08-31 13:29:46 +02:00
|
|
|
static void display(Hashmap *a) {
|
2012-01-22 18:21:15 +01:00
|
|
|
Iterator i;
|
|
|
|
Group *g;
|
|
|
|
Group **array;
|
2013-04-02 18:52:16 +02:00
|
|
|
signed path_columns;
|
2015-03-11 11:24:18 +01:00
|
|
|
unsigned rows, n = 0, j, maxtcpu = 0, maxtpath = 3; /* 3 for ellipsize() to work properly */
|
2013-04-08 15:23:12 +02:00
|
|
|
char buffer[MAX3(21, FORMAT_BYTES_MAX, FORMAT_TIMESPAN_MAX)];
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
assert(a);
|
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
if (on_tty())
|
2015-09-19 00:45:05 +02:00
|
|
|
fputs(ANSI_HOME_CLEAR, stdout);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
array = alloca(sizeof(Group*) * hashmap_size(a));
|
|
|
|
|
|
|
|
HASHMAP_FOREACH(g, a, i)
|
|
|
|
if (g->n_tasks_valid || g->cpu_valid || g->memory_valid || g->io_valid)
|
|
|
|
array[n++] = g;
|
|
|
|
|
2013-10-12 01:33:13 +02:00
|
|
|
qsort_safe(array, n, sizeof(Group*), group_compare);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
/* Find the longest names in one run */
|
|
|
|
for (j = 0; j < n; j++) {
|
|
|
|
unsigned cputlen, pathtlen;
|
2013-04-08 15:23:12 +02:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
format_timespan(buffer, sizeof(buffer), (usec_t) (array[j]->cpu_usage / NSEC_PER_USEC), 0);
|
2013-04-08 15:23:12 +02:00
|
|
|
cputlen = strlen(buffer);
|
2013-04-02 18:52:16 +02:00
|
|
|
maxtcpu = MAX(maxtcpu, cputlen);
|
2015-08-28 02:04:33 +02:00
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
pathtlen = strlen(array[j]->path);
|
|
|
|
maxtpath = MAX(maxtpath, pathtlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arg_cpu_type == CPU_PERCENT)
|
2016-01-12 15:34:20 +01:00
|
|
|
xsprintf(buffer, "%6s", "%CPU");
|
2013-04-02 18:52:16 +02:00
|
|
|
else
|
2016-01-12 15:34:20 +01:00
|
|
|
xsprintf(buffer, "%*s", maxtcpu, "CPU Time");
|
2013-04-02 18:52:16 +02:00
|
|
|
|
2012-10-19 00:06:47 +02:00
|
|
|
rows = lines();
|
|
|
|
if (rows <= 10)
|
|
|
|
rows = 10;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
if (on_tty()) {
|
2015-09-19 00:45:05 +02:00
|
|
|
const char *on, *off;
|
|
|
|
|
2013-04-08 15:23:12 +02:00
|
|
|
path_columns = columns() - 36 - strlen(buffer);
|
2013-04-02 18:52:16 +02:00
|
|
|
if (path_columns < 10)
|
|
|
|
path_columns = 10;
|
|
|
|
|
2015-09-19 00:45:05 +02:00
|
|
|
on = ansi_highlight_underline();
|
|
|
|
off = ansi_underline();
|
|
|
|
|
|
|
|
printf("%s%s%-*s%s %s%7s%s %s%s%s %s%8s%s %s%8s%s %s%8s%s%s\n",
|
|
|
|
ansi_underline(),
|
|
|
|
arg_order == ORDER_PATH ? on : "", path_columns, "Control Group",
|
|
|
|
arg_order == ORDER_PATH ? off : "",
|
|
|
|
arg_order == ORDER_TASKS ? on : "", arg_count == COUNT_PIDS ? "Tasks" : arg_count == COUNT_USERSPACE_PROCESSES ? "Procs" : "Proc+",
|
|
|
|
arg_order == ORDER_TASKS ? off : "",
|
|
|
|
arg_order == ORDER_CPU ? on : "", buffer,
|
|
|
|
arg_order == ORDER_CPU ? off : "",
|
|
|
|
arg_order == ORDER_MEMORY ? on : "", "Memory",
|
|
|
|
arg_order == ORDER_MEMORY ? off : "",
|
|
|
|
arg_order == ORDER_IO ? on : "", "Input/s",
|
|
|
|
arg_order == ORDER_IO ? off : "",
|
|
|
|
arg_order == ORDER_IO ? on : "", "Output/s",
|
|
|
|
arg_order == ORDER_IO ? off : "",
|
|
|
|
ansi_normal());
|
2013-04-02 18:52:16 +02:00
|
|
|
} else
|
|
|
|
path_columns = maxtpath;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
for (j = 0; j < n; j++) {
|
2015-08-31 19:43:54 +02:00
|
|
|
_cleanup_free_ char *ellipsized = NULL;
|
|
|
|
const char *path;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-09-19 00:48:49 +02:00
|
|
|
if (on_tty() && j + 6 > rows)
|
2012-01-22 18:21:15 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
g = array[j];
|
|
|
|
|
2015-08-31 19:43:54 +02:00
|
|
|
path = isempty(g->path) ? "/" : g->path;
|
|
|
|
ellipsized = ellipsize(path, path_columns, 33);
|
|
|
|
printf("%-*s", path_columns, ellipsized ?: path);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
if (g->n_tasks_valid)
|
2015-09-10 12:32:16 +02:00
|
|
|
printf(" %7" PRIu64, g->n_tasks);
|
2012-01-22 18:21:15 +01:00
|
|
|
else
|
|
|
|
fputs(" -", stdout);
|
|
|
|
|
2013-04-08 15:23:12 +02:00
|
|
|
if (arg_cpu_type == CPU_PERCENT) {
|
2013-04-02 18:52:16 +02:00
|
|
|
if (g->cpu_valid)
|
|
|
|
printf(" %6.1f", g->cpu_fraction*100);
|
|
|
|
else
|
|
|
|
fputs(" -", stdout);
|
2013-04-08 15:23:12 +02:00
|
|
|
} else
|
2015-08-28 02:04:33 +02:00
|
|
|
printf(" %*s", maxtcpu, format_timespan(buffer, sizeof(buffer), (usec_t) (g->cpu_usage / NSEC_PER_USEC), 0));
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2015-05-22 23:20:49 +02:00
|
|
|
printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->memory_valid, g->memory));
|
|
|
|
printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_input_bps));
|
|
|
|
printf(" %8s", maybe_format_bytes(buffer, sizeof(buffer), g->io_valid, g->io_output_bps));
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
putchar('\n');
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-02 17:12:21 +02:00
|
|
|
static void help(void) {
|
2012-01-22 18:21:15 +01:00
|
|
|
printf("%s [OPTIONS...]\n\n"
|
|
|
|
"Show top control groups by their resource usage.\n\n"
|
|
|
|
" -h --help Show this help\n"
|
2015-08-28 02:04:33 +02:00
|
|
|
" --version Show package version\n"
|
|
|
|
" -p --order=path Order by path\n"
|
2015-09-10 12:32:16 +02:00
|
|
|
" -t --order=tasks Order by number of tasks/processes\n"
|
2015-08-28 02:04:33 +02:00
|
|
|
" -c --order=cpu Order by CPU load (default)\n"
|
|
|
|
" -m --order=memory Order by memory load\n"
|
|
|
|
" -i --order=io Order by IO load\n"
|
2015-05-22 23:20:49 +02:00
|
|
|
" -r --raw Provide raw (not human-readable) numbers\n"
|
2015-08-28 02:04:33 +02:00
|
|
|
" --cpu=percentage Show CPU usage as percentage (default)\n"
|
|
|
|
" --cpu=time Show CPU usage as time\n"
|
2015-09-10 12:32:16 +02:00
|
|
|
" -P Count userspace processes instead of tasks (excl. kernel)\n"
|
|
|
|
" -k Count all processes instead of tasks (incl. kernel)\n"
|
|
|
|
" --recursive=BOOL Sum up process count recursively\n"
|
2013-04-02 18:52:16 +02:00
|
|
|
" -d --delay=DELAY Delay between updates\n"
|
2012-07-26 01:33:06 +02:00
|
|
|
" -n --iterations=N Run for N iterations before exiting\n"
|
2012-07-26 01:33:07 +02:00
|
|
|
" -b --batch Run in batch mode, accepting no input\n"
|
2014-08-02 17:12:21 +02:00
|
|
|
" --depth=DEPTH Maximum traversal depth (default: %u)\n"
|
2015-09-21 14:04:45 +02:00
|
|
|
" -M --machine= Show container\n"
|
2014-08-02 17:12:21 +02:00
|
|
|
, program_invocation_short_name, arg_depth);
|
2012-07-26 23:09:02 +02:00
|
|
|
}
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
static int parse_argv(int argc, char *argv[]) {
|
|
|
|
|
|
|
|
enum {
|
2012-07-26 23:09:02 +02:00
|
|
|
ARG_VERSION = 0x100,
|
|
|
|
ARG_DEPTH,
|
2015-08-28 02:04:33 +02:00
|
|
|
ARG_CPU_TYPE,
|
|
|
|
ARG_ORDER,
|
2015-08-30 15:11:35 +02:00
|
|
|
ARG_RECURSIVE,
|
2012-01-22 18:21:15 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct option options[] = {
|
2015-08-30 15:11:35 +02:00
|
|
|
{ "help", no_argument, NULL, 'h' },
|
|
|
|
{ "version", no_argument, NULL, ARG_VERSION },
|
|
|
|
{ "delay", required_argument, NULL, 'd' },
|
|
|
|
{ "iterations", required_argument, NULL, 'n' },
|
|
|
|
{ "batch", no_argument, NULL, 'b' },
|
|
|
|
{ "raw", no_argument, NULL, 'r' },
|
|
|
|
{ "depth", required_argument, NULL, ARG_DEPTH },
|
|
|
|
{ "cpu", optional_argument, NULL, ARG_CPU_TYPE },
|
|
|
|
{ "order", required_argument, NULL, ARG_ORDER },
|
|
|
|
{ "recursive", required_argument, NULL, ARG_RECURSIVE },
|
2015-09-21 14:04:45 +02:00
|
|
|
{ "machine", required_argument, NULL, 'M' },
|
2013-11-06 18:28:39 +01:00
|
|
|
{}
|
2012-01-22 18:21:15 +01:00
|
|
|
};
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
bool recursive_unset = false;
|
2015-08-30 15:11:35 +02:00
|
|
|
int c, r;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
assert(argc >= 1);
|
|
|
|
assert(argv);
|
|
|
|
|
2015-09-21 14:04:45 +02:00
|
|
|
while ((c = getopt_long(argc, argv, "hptcmin:brd:kPM:", options, NULL)) >= 0)
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
switch (c) {
|
|
|
|
|
|
|
|
case 'h':
|
2014-08-02 17:12:21 +02:00
|
|
|
help();
|
|
|
|
return 0;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2012-07-26 23:09:02 +02:00
|
|
|
case ARG_VERSION:
|
2015-09-23 03:01:06 +02:00
|
|
|
return version();
|
2012-07-26 23:09:02 +02:00
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
case ARG_CPU_TYPE:
|
|
|
|
if (optarg) {
|
2015-08-28 02:04:33 +02:00
|
|
|
if (streq(optarg, "time"))
|
2013-04-02 18:52:16 +02:00
|
|
|
arg_cpu_type = CPU_TIME;
|
2015-08-28 02:04:33 +02:00
|
|
|
else if (streq(optarg, "percentage"))
|
2013-04-02 18:52:16 +02:00
|
|
|
arg_cpu_type = CPU_PERCENT;
|
2015-08-28 02:04:33 +02:00
|
|
|
else {
|
|
|
|
log_error("Unknown argument to --cpu=: %s", optarg);
|
2013-04-02 18:52:16 +02:00
|
|
|
return -EINVAL;
|
2015-08-28 02:04:33 +02:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
arg_cpu_type = CPU_TIME;
|
|
|
|
|
2013-04-02 18:52:16 +02:00
|
|
|
break;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
case ARG_DEPTH:
|
|
|
|
r = safe_atou(optarg, &arg_depth);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse depth parameter.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'd':
|
2013-04-02 20:38:16 +02:00
|
|
|
r = parse_sec(optarg, &arg_delay);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (r < 0 || arg_delay <= 0) {
|
|
|
|
log_error("Failed to parse delay parameter.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
2012-07-26 01:33:06 +02:00
|
|
|
case 'n':
|
|
|
|
r = safe_atou(optarg, &arg_iterations);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse iterations parameter.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
|
2012-07-26 01:33:07 +02:00
|
|
|
case 'b':
|
|
|
|
arg_batch = true;
|
|
|
|
break;
|
|
|
|
|
2015-05-22 23:20:49 +02:00
|
|
|
case 'r':
|
|
|
|
arg_raw = true;
|
|
|
|
break;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
case 'p':
|
|
|
|
arg_order = ORDER_PATH;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 't':
|
|
|
|
arg_order = ORDER_TASKS;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'c':
|
|
|
|
arg_order = ORDER_CPU;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'm':
|
|
|
|
arg_order = ORDER_MEMORY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'i':
|
|
|
|
arg_order = ORDER_IO;
|
|
|
|
break;
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
case ARG_ORDER:
|
|
|
|
if (streq(optarg, "path"))
|
|
|
|
arg_order = ORDER_PATH;
|
|
|
|
else if (streq(optarg, "tasks"))
|
|
|
|
arg_order = ORDER_TASKS;
|
|
|
|
else if (streq(optarg, "cpu"))
|
|
|
|
arg_order = ORDER_CPU;
|
|
|
|
else if (streq(optarg, "memory"))
|
|
|
|
arg_order = ORDER_MEMORY;
|
|
|
|
else if (streq(optarg, "io"))
|
|
|
|
arg_order = ORDER_IO;
|
|
|
|
else {
|
|
|
|
log_error("Invalid argument to --order=: %s", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2015-08-28 19:31:07 +02:00
|
|
|
case 'k':
|
2015-09-10 12:32:16 +02:00
|
|
|
arg_count = COUNT_ALL_PROCESSES;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'P':
|
|
|
|
arg_count = COUNT_USERSPACE_PROCESSES;
|
2015-08-28 19:31:07 +02:00
|
|
|
break;
|
|
|
|
|
2015-08-30 15:11:35 +02:00
|
|
|
case ARG_RECURSIVE:
|
|
|
|
r = parse_boolean(optarg);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse --recursive= argument: %s", optarg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_recursive = r;
|
2015-09-10 12:32:16 +02:00
|
|
|
recursive_unset = r == 0;
|
2015-08-30 15:11:35 +02:00
|
|
|
break;
|
|
|
|
|
2015-09-21 14:04:45 +02:00
|
|
|
case 'M':
|
|
|
|
arg_machine = optarg;
|
|
|
|
break;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
case '?':
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
default:
|
2013-11-06 18:28:39 +01:00
|
|
|
assert_not_reached("Unhandled option");
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (optind < argc) {
|
|
|
|
log_error("Too many arguments.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
if (recursive_unset && arg_count == COUNT_PIDS) {
|
|
|
|
log_error("Non-recursive counting is only supported when counting processes, not tasks. Use -P or -k.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
static const char* counting_what(void) {
|
|
|
|
if (arg_count == COUNT_PIDS)
|
|
|
|
return "tasks";
|
|
|
|
else if (arg_count == COUNT_ALL_PROCESSES)
|
|
|
|
return "all processes (incl. kernel)";
|
|
|
|
else
|
|
|
|
return "userspace processes (excl. kernel)";
|
|
|
|
}
|
|
|
|
|
2015-09-21 14:04:45 +02:00
|
|
|
static int get_cgroup_root(char **ret) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
|
|
|
_cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
|
2015-09-21 14:04:45 +02:00
|
|
|
_cleanup_free_ char *unit = NULL, *path = NULL;
|
|
|
|
const char *m;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
if (!arg_machine) {
|
|
|
|
r = cg_get_root_path(ret);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to get root control group path: %m");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
m = strjoina("/run/systemd/machines/", arg_machine);
|
|
|
|
r = parse_env_file(m, NEWLINE, "SCOPE", &unit, NULL);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to load machine data: %m");
|
|
|
|
|
|
|
|
path = unit_dbus_path_from_name(unit);
|
|
|
|
if (!path)
|
|
|
|
return log_oom();
|
|
|
|
|
2015-09-24 13:30:10 +02:00
|
|
|
r = bus_connect_transport_systemd(BUS_TRANSPORT_LOCAL, NULL, false, &bus);
|
2015-09-21 14:04:45 +02:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to create bus connection: %m");
|
|
|
|
|
|
|
|
r = sd_bus_get_property_string(
|
|
|
|
bus,
|
|
|
|
"org.freedesktop.systemd1",
|
|
|
|
path,
|
|
|
|
unit_dbus_interface_from_name(unit),
|
|
|
|
"ControlGroup",
|
|
|
|
&error,
|
|
|
|
ret);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to query unit control group path: %s", bus_error_message(&error, r));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
int main(int argc, char *argv[]) {
|
|
|
|
int r;
|
|
|
|
Hashmap *a = NULL, *b = NULL;
|
|
|
|
unsigned iteration = 0;
|
|
|
|
usec_t last_refresh = 0;
|
|
|
|
bool quit = false, immediate_refresh = false;
|
2015-08-28 19:17:47 +02:00
|
|
|
_cleanup_free_ char *root = NULL;
|
2015-09-10 12:32:16 +02:00
|
|
|
CGroupMask mask;
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
log_parse_environment();
|
|
|
|
log_open();
|
|
|
|
|
2015-09-10 12:32:16 +02:00
|
|
|
r = cg_mask_supported(&mask);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to determine supported controllers: %m");
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_count = (mask & CGROUP_MASK_PIDS) ? COUNT_PIDS : COUNT_USERSPACE_PROCESSES;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
r = parse_argv(argc, argv);
|
|
|
|
if (r <= 0)
|
|
|
|
goto finish;
|
|
|
|
|
2015-09-21 14:04:45 +02:00
|
|
|
r = get_cgroup_root(&root);
|
2015-08-28 19:17:47 +02:00
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to get root control group path: %m");
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2014-08-13 01:00:18 +02:00
|
|
|
a = hashmap_new(&string_hash_ops);
|
|
|
|
b = hashmap_new(&string_hash_ops);
|
2012-01-22 18:21:15 +01:00
|
|
|
if (!a || !b) {
|
2012-07-25 23:55:59 +02:00
|
|
|
r = log_oom();
|
2012-01-22 18:21:15 +01:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2012-10-19 00:06:47 +02:00
|
|
|
signal(SIGWINCH, columns_lines_cache_reset);
|
2012-10-18 23:50:26 +02:00
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (arg_iterations == (unsigned) -1)
|
2015-05-23 05:18:15 +02:00
|
|
|
arg_iterations = on_tty() ? 0 : 1;
|
2013-04-02 18:52:16 +02:00
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
while (!quit) {
|
|
|
|
Hashmap *c;
|
|
|
|
usec_t t;
|
|
|
|
char key;
|
|
|
|
char h[FORMAT_TIMESPAN_MAX];
|
|
|
|
|
|
|
|
t = now(CLOCK_MONOTONIC);
|
|
|
|
|
|
|
|
if (t >= last_refresh + arg_delay || immediate_refresh) {
|
|
|
|
|
2015-08-28 19:17:47 +02:00
|
|
|
r = refresh(root, a, b, iteration++);
|
2015-08-31 13:29:46 +02:00
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to refresh: %m");
|
2012-01-22 18:21:15 +01:00
|
|
|
goto finish;
|
2015-08-31 13:29:46 +02:00
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
|
|
|
|
group_hashmap_clear(b);
|
|
|
|
|
|
|
|
c = a;
|
|
|
|
a = b;
|
|
|
|
b = c;
|
|
|
|
|
|
|
|
last_refresh = t;
|
|
|
|
immediate_refresh = false;
|
|
|
|
}
|
|
|
|
|
2015-08-31 13:29:46 +02:00
|
|
|
display(b);
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2012-07-26 01:33:06 +02:00
|
|
|
if (arg_iterations && iteration >= arg_iterations)
|
|
|
|
break;
|
|
|
|
|
2015-05-28 00:30:11 +02:00
|
|
|
if (!on_tty()) /* non-TTY: Empty newline as delimiter between polls */
|
|
|
|
fputs("\n", stdout);
|
|
|
|
fflush(stdout);
|
|
|
|
|
2015-08-28 02:04:33 +02:00
|
|
|
if (arg_batch)
|
2015-08-31 13:29:46 +02:00
|
|
|
(void) usleep(last_refresh + arg_delay - t);
|
2015-08-28 02:04:33 +02:00
|
|
|
else {
|
|
|
|
r = read_one_char(stdin, &key, last_refresh + arg_delay - t, NULL);
|
2012-07-26 01:33:07 +02:00
|
|
|
if (r == -ETIMEDOUT)
|
|
|
|
continue;
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Couldn't read key: %m");
|
2012-07-26 01:33:07 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|
|
|
|
|
2015-05-28 00:30:11 +02:00
|
|
|
if (on_tty()) { /* TTY: Clear any user keystroke */
|
|
|
|
fputs("\r \r", stdout);
|
|
|
|
fflush(stdout);
|
|
|
|
}
|
2012-01-22 18:21:15 +01:00
|
|
|
|
2012-07-26 01:33:07 +02:00
|
|
|
if (arg_batch)
|
|
|
|
continue;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
switch (key) {
|
|
|
|
|
|
|
|
case ' ':
|
|
|
|
immediate_refresh = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'q':
|
|
|
|
quit = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'p':
|
|
|
|
arg_order = ORDER_PATH;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 't':
|
|
|
|
arg_order = ORDER_TASKS;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'c':
|
|
|
|
arg_order = ORDER_CPU;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'm':
|
|
|
|
arg_order = ORDER_MEMORY;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'i':
|
|
|
|
arg_order = ORDER_IO;
|
|
|
|
break;
|
|
|
|
|
2013-04-02 20:55:36 +02:00
|
|
|
case '%':
|
|
|
|
arg_cpu_type = arg_cpu_type == CPU_TIME ? CPU_PERCENT : CPU_TIME;
|
|
|
|
break;
|
|
|
|
|
2015-08-30 16:15:08 +02:00
|
|
|
case 'k':
|
2015-09-10 12:32:16 +02:00
|
|
|
arg_count = arg_count != COUNT_ALL_PROCESSES ? COUNT_ALL_PROCESSES : COUNT_PIDS;
|
|
|
|
fprintf(stdout, "\nCounting: %s.", counting_what());
|
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'P':
|
|
|
|
arg_count = arg_count != COUNT_USERSPACE_PROCESSES ? COUNT_USERSPACE_PROCESSES : COUNT_PIDS;
|
|
|
|
fprintf(stdout, "\nCounting: %s.", counting_what());
|
2015-08-30 16:15:08 +02:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'r':
|
2015-09-10 12:32:16 +02:00
|
|
|
if (arg_count == COUNT_PIDS)
|
|
|
|
fprintf(stdout, "\n\aCannot toggle recursive counting, not available in task counting mode.");
|
|
|
|
else {
|
|
|
|
arg_recursive = !arg_recursive;
|
|
|
|
fprintf(stdout, "\nRecursive process counting: %s", yes_no(arg_recursive));
|
|
|
|
}
|
2015-08-30 16:15:08 +02:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
case '+':
|
|
|
|
if (arg_delay < USEC_PER_SEC)
|
|
|
|
arg_delay += USEC_PER_MSEC*250;
|
|
|
|
else
|
|
|
|
arg_delay += USEC_PER_SEC;
|
|
|
|
|
2013-04-04 02:56:56 +02:00
|
|
|
fprintf(stdout, "\nIncreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
|
2012-01-22 18:21:15 +01:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case '-':
|
|
|
|
if (arg_delay <= USEC_PER_MSEC*500)
|
|
|
|
arg_delay = USEC_PER_MSEC*250;
|
|
|
|
else if (arg_delay < USEC_PER_MSEC*1250)
|
|
|
|
arg_delay -= USEC_PER_MSEC*250;
|
|
|
|
else
|
|
|
|
arg_delay -= USEC_PER_SEC;
|
|
|
|
|
2013-04-04 02:56:56 +02:00
|
|
|
fprintf(stdout, "\nDecreased delay to %s.", format_timespan(h, sizeof(h), arg_delay, 0));
|
2012-01-22 18:21:15 +01:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case '?':
|
|
|
|
case 'h':
|
2015-09-19 00:45:05 +02:00
|
|
|
|
|
|
|
#define ON ANSI_HIGHLIGHT
|
|
|
|
#define OFF ANSI_NORMAL
|
|
|
|
|
2012-01-22 18:21:15 +01:00
|
|
|
fprintf(stdout,
|
2015-09-10 12:32:16 +02:00
|
|
|
"\t<" ON "p" OFF "> By path; <" ON "t" OFF "> By tasks/procs; <" ON "c" OFF "> By CPU; <" ON "m" OFF "> By memory; <" ON "i" OFF "> By I/O\n"
|
2015-08-30 16:15:08 +02:00
|
|
|
"\t<" ON "+" OFF "> Inc. delay; <" ON "-" OFF "> Dec. delay; <" ON "%%" OFF "> Toggle time; <" ON "SPACE" OFF "> Refresh\n"
|
2015-09-10 12:32:16 +02:00
|
|
|
"\t<" ON "P" OFF "> Toggle count userspace processes; <" ON "k" OFF "> Toggle count all processes\n"
|
|
|
|
"\t<" ON "r" OFF "> Count processes recursively; <" ON "q" OFF "> Quit");
|
2012-01-22 18:21:15 +01:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(3);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2015-08-28 02:04:33 +02:00
|
|
|
if (key < ' ')
|
|
|
|
fprintf(stdout, "\nUnknown key '\\x%x'. Ignoring.", key);
|
|
|
|
else
|
|
|
|
fprintf(stdout, "\nUnknown key '%c'. Ignoring.", key);
|
2012-01-22 18:21:15 +01:00
|
|
|
fflush(stdout);
|
|
|
|
sleep(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
finish:
|
|
|
|
group_hashmap_free(a);
|
|
|
|
group_hashmap_free(b);
|
|
|
|
|
2015-08-31 13:29:46 +02:00
|
|
|
return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
|
2012-01-22 18:21:15 +01:00
|
|
|
}
|