2017-11-18 17:09:20 +01:00
|
|
|
/* SPDX-License-Identifier: LGPL-2.1+ */
|
2012-07-18 19:07:51 +02:00
|
|
|
#pragma once
|
2009-11-18 00:42:52 +01:00
|
|
|
|
|
|
|
#include <stdbool.h>
|
2010-01-19 00:22:34 +01:00
|
|
|
#include <stdio.h>
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
#include "sd-bus.h"
|
2018-10-08 15:47:29 +02:00
|
|
|
#include "sd-device.h"
|
2013-11-19 21:12:59 +01:00
|
|
|
#include "sd-event.h"
|
2015-11-18 22:46:33 +01:00
|
|
|
|
2013-06-27 04:14:27 +02:00
|
|
|
#include "cgroup-util.h"
|
2019-11-05 13:50:28 +01:00
|
|
|
#include "cgroup.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "fdset.h"
|
2015-01-28 02:18:59 +01:00
|
|
|
#include "hashmap.h"
|
2016-11-11 19:59:19 +01:00
|
|
|
#include "ip-address-access.h"
|
2015-01-28 02:18:59 +01:00
|
|
|
#include "list.h"
|
2019-05-22 12:12:17 +02:00
|
|
|
#include "prioq.h"
|
2015-01-28 02:18:59 +01:00
|
|
|
#include "ratelimit.h"
|
2019-08-07 14:58:59 +02:00
|
|
|
#include "varlink.h"
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2018-03-26 17:34:53 +02:00
|
|
|
struct libmnt_monitor;
|
2018-05-15 20:17:34 +02:00
|
|
|
typedef struct Unit Unit;
|
2018-03-26 17:34:53 +02:00
|
|
|
|
2010-04-22 02:56:42 +02:00
|
|
|
/* Enforce upper limit how many names we allow */
|
2010-11-24 23:36:40 +01:00
|
|
|
#define MANAGER_MAX_NAMES 131072 /* 128K */
|
2010-04-22 02:56:42 +02:00
|
|
|
|
2009-11-18 00:42:52 +01:00
|
|
|
typedef struct Manager Manager;
|
2010-01-27 04:31:52 +01:00
|
|
|
|
2018-10-09 15:32:35 +02:00
|
|
|
/* An externally visible state. We don't actually maintain this as state variable, but derive it from various fields
|
|
|
|
* when requested */
|
2014-03-12 20:55:13 +01:00
|
|
|
typedef enum ManagerState {
|
2014-08-22 18:07:18 +02:00
|
|
|
MANAGER_INITIALIZING,
|
2014-03-12 20:55:13 +01:00
|
|
|
MANAGER_STARTING,
|
2010-04-21 03:27:44 +02:00
|
|
|
MANAGER_RUNNING,
|
2014-03-12 20:55:13 +01:00
|
|
|
MANAGER_DEGRADED,
|
|
|
|
MANAGER_MAINTENANCE,
|
|
|
|
MANAGER_STOPPING,
|
|
|
|
_MANAGER_STATE_MAX,
|
|
|
|
_MANAGER_STATE_INVALID = -1
|
|
|
|
} ManagerState;
|
|
|
|
|
2018-10-09 15:42:19 +02:00
|
|
|
typedef enum ManagerObjective {
|
2014-03-12 20:55:13 +01:00
|
|
|
MANAGER_OK,
|
2010-04-21 03:27:44 +02:00
|
|
|
MANAGER_EXIT,
|
|
|
|
MANAGER_RELOAD,
|
|
|
|
MANAGER_REEXECUTE,
|
2010-10-14 00:52:26 +02:00
|
|
|
MANAGER_REBOOT,
|
|
|
|
MANAGER_POWEROFF,
|
|
|
|
MANAGER_HALT,
|
|
|
|
MANAGER_KEXEC,
|
2012-05-09 01:24:50 +02:00
|
|
|
MANAGER_SWITCH_ROOT,
|
2018-10-09 15:42:19 +02:00
|
|
|
_MANAGER_OBJECTIVE_MAX,
|
|
|
|
_MANAGER_OBJECTIVE_INVALID = -1
|
|
|
|
} ManagerObjective;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2014-10-28 04:02:54 +01:00
|
|
|
typedef enum StatusType {
|
|
|
|
STATUS_TYPE_EPHEMERAL,
|
|
|
|
STATUS_TYPE_NORMAL,
|
2020-02-29 17:19:46 +01:00
|
|
|
STATUS_TYPE_NOTICE,
|
2014-10-28 04:14:23 +01:00
|
|
|
STATUS_TYPE_EMERGENCY,
|
2014-10-28 04:02:54 +01:00
|
|
|
} StatusType;
|
|
|
|
|
2019-03-19 19:05:19 +01:00
|
|
|
typedef enum OOMPolicy {
|
|
|
|
OOM_CONTINUE, /* The kernel kills the process it wants to kill, and that's it */
|
|
|
|
OOM_STOP, /* The kernel kills the process it wants to kill, and we stop the unit */
|
|
|
|
OOM_KILL, /* The kernel kills the process it wants to kill, and all others in the unit, and we stop the unit */
|
|
|
|
_OOM_POLICY_MAX,
|
|
|
|
_OOM_POLICY_INVALID = -1
|
|
|
|
} OOMPolicy;
|
|
|
|
|
2018-08-06 09:16:21 +02:00
|
|
|
/* Notes:
|
|
|
|
* 1. TIMESTAMP_FIRMWARE, TIMESTAMP_LOADER, TIMESTAMP_KERNEL, TIMESTAMP_INITRD,
|
|
|
|
* TIMESTAMP_SECURITY_START, and TIMESTAMP_SECURITY_FINISH are set only when
|
|
|
|
* the manager is system and not running under container environment.
|
|
|
|
*
|
|
|
|
* 2. The monotonic timestamp of TIMESTAMP_KERNEL is always zero.
|
|
|
|
*
|
|
|
|
* 3. The realtime timestamp of TIMESTAMP_KERNEL will be unset if the system does not
|
|
|
|
* have RTC.
|
|
|
|
*
|
|
|
|
* 4. TIMESTAMP_FIRMWARE and TIMESTAMP_LOADER will be unset if the system does not
|
|
|
|
* have RTC, or systemd is built without EFI support.
|
|
|
|
*
|
|
|
|
* 5. The monotonic timestamps of TIMESTAMP_FIRMWARE and TIMESTAMP_LOADER are stored as
|
|
|
|
* negative of the actual value.
|
|
|
|
*
|
|
|
|
* 6. TIMESTAMP_USERSPACE is the timestamp of when the manager was started.
|
|
|
|
*
|
|
|
|
* 7. TIMESTAMP_INITRD_* are set only when the system is booted with an initrd.
|
|
|
|
*/
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
typedef enum ManagerTimestamp {
|
|
|
|
MANAGER_TIMESTAMP_FIRMWARE,
|
|
|
|
MANAGER_TIMESTAMP_LOADER,
|
|
|
|
MANAGER_TIMESTAMP_KERNEL,
|
|
|
|
MANAGER_TIMESTAMP_INITRD,
|
|
|
|
MANAGER_TIMESTAMP_USERSPACE,
|
|
|
|
MANAGER_TIMESTAMP_FINISH,
|
|
|
|
|
|
|
|
MANAGER_TIMESTAMP_SECURITY_START,
|
|
|
|
MANAGER_TIMESTAMP_SECURITY_FINISH,
|
|
|
|
MANAGER_TIMESTAMP_GENERATORS_START,
|
|
|
|
MANAGER_TIMESTAMP_GENERATORS_FINISH,
|
|
|
|
MANAGER_TIMESTAMP_UNITS_LOAD_START,
|
|
|
|
MANAGER_TIMESTAMP_UNITS_LOAD_FINISH,
|
2018-07-22 06:41:44 +02:00
|
|
|
|
|
|
|
MANAGER_TIMESTAMP_INITRD_SECURITY_START,
|
|
|
|
MANAGER_TIMESTAMP_INITRD_SECURITY_FINISH,
|
|
|
|
MANAGER_TIMESTAMP_INITRD_GENERATORS_START,
|
|
|
|
MANAGER_TIMESTAMP_INITRD_GENERATORS_FINISH,
|
|
|
|
MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_START,
|
|
|
|
MANAGER_TIMESTAMP_INITRD_UNITS_LOAD_FINISH,
|
2017-11-20 21:01:13 +01:00
|
|
|
_MANAGER_TIMESTAMP_MAX,
|
|
|
|
_MANAGER_TIMESTAMP_INVALID = -1,
|
|
|
|
} ManagerTimestamp;
|
|
|
|
|
2020-04-22 16:16:47 +02:00
|
|
|
typedef enum WatchdogType {
|
|
|
|
WATCHDOG_RUNTIME,
|
|
|
|
WATCHDOG_REBOOT,
|
|
|
|
WATCHDOG_KEXEC,
|
|
|
|
_WATCHDOG_TYPE_MAX,
|
|
|
|
} WatchdogType;
|
|
|
|
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "execute.h"
|
2009-11-18 00:42:52 +01:00
|
|
|
#include "job.h"
|
2010-06-15 14:45:15 +02:00
|
|
|
#include "path-lookup.h"
|
2014-03-03 21:23:12 +01:00
|
|
|
#include "show-status.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "unit-name.h"
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2018-10-09 16:15:54 +02:00
|
|
|
typedef enum ManagerTestRunFlags {
|
|
|
|
MANAGER_TEST_NORMAL = 0, /* run normally */
|
2018-10-09 15:45:05 +02:00
|
|
|
MANAGER_TEST_RUN_MINIMAL = 1 << 0, /* create basic data structures */
|
|
|
|
MANAGER_TEST_RUN_BASIC = 1 << 1, /* interact with the environment */
|
|
|
|
MANAGER_TEST_RUN_ENV_GENERATORS = 1 << 2, /* also run env generators */
|
|
|
|
MANAGER_TEST_RUN_GENERATORS = 1 << 3, /* also run unit generators */
|
2018-03-10 11:02:18 +01:00
|
|
|
MANAGER_TEST_FULL = MANAGER_TEST_RUN_BASIC | MANAGER_TEST_RUN_ENV_GENERATORS | MANAGER_TEST_RUN_GENERATORS,
|
2018-10-09 16:15:54 +02:00
|
|
|
} ManagerTestRunFlags;
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
assert_cc((MANAGER_TEST_FULL & UINT8_MAX) == MANAGER_TEST_FULL);
|
|
|
|
|
2009-11-18 00:42:52 +01:00
|
|
|
struct Manager {
|
2010-01-26 21:39:06 +01:00
|
|
|
/* Note that the set of units we know of is allowed to be
|
2011-02-21 15:32:17 +01:00
|
|
|
* inconsistent. However the subset of it that is loaded may
|
2010-01-18 23:50:13 +01:00
|
|
|
* not, and the list of jobs may neither. */
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
/* Active jobs and units */
|
|
|
|
Hashmap *units; /* name string => Unit object n:1 */
|
2016-08-30 23:18:46 +02:00
|
|
|
Hashmap *units_by_invocation_id;
|
2009-11-18 00:42:52 +01:00
|
|
|
Hashmap *jobs; /* job id => Job object 1:1 */
|
|
|
|
|
2010-01-29 06:04:08 +01:00
|
|
|
/* To make it easy to iterate through the units of a specific
|
|
|
|
* type we maintain a per type linked list */
|
2012-01-15 12:04:08 +01:00
|
|
|
LIST_HEAD(Unit, units_by_type[_UNIT_TYPE_MAX]);
|
2010-01-29 06:04:08 +01:00
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
/* Units that need to be loaded */
|
2012-01-15 12:04:08 +01:00
|
|
|
LIST_HEAD(Unit, load_queue); /* this is actually more a stack than a queue, but uh. */
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
/* Jobs that need to be run */
|
2019-05-22 12:12:17 +02:00
|
|
|
struct Prioq *run_queue;
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
/* Units and jobs that have not yet been announced via
|
|
|
|
* D-Bus. When something about a job changes it is added here
|
|
|
|
* if it is not in there yet. This allows easy coalescing of
|
|
|
|
* D-Bus change signals. */
|
2012-01-15 12:04:08 +01:00
|
|
|
LIST_HEAD(Unit, dbus_unit_queue);
|
2010-02-05 00:38:41 +01:00
|
|
|
LIST_HEAD(Job, dbus_job_queue);
|
|
|
|
|
2010-04-21 06:01:13 +02:00
|
|
|
/* Units to remove */
|
2012-01-15 12:04:08 +01:00
|
|
|
LIST_HEAD(Unit, cleanup_queue);
|
2010-04-06 02:43:58 +02:00
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
/* Units and jobs to check when doing GC */
|
|
|
|
LIST_HEAD(Unit, gc_unit_queue);
|
|
|
|
LIST_HEAD(Job, gc_job_queue);
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2013-06-27 04:14:27 +02:00
|
|
|
/* Units that should be realized */
|
2017-09-26 22:15:02 +02:00
|
|
|
LIST_HEAD(Unit, cgroup_realize_queue);
|
2013-06-27 04:14:27 +02:00
|
|
|
|
2017-09-26 22:43:08 +02:00
|
|
|
/* Units whose cgroup ran empty */
|
|
|
|
LIST_HEAD(Unit, cgroup_empty_queue);
|
|
|
|
|
2019-03-19 19:05:19 +01:00
|
|
|
/* Units whose memory.event fired */
|
|
|
|
LIST_HEAD(Unit, cgroup_oom_queue);
|
|
|
|
|
2018-03-23 15:28:06 +01:00
|
|
|
/* Target units whose default target dependencies haven't been set yet */
|
|
|
|
LIST_HEAD(Unit, target_deps_queue);
|
|
|
|
|
2018-08-09 16:26:27 +02:00
|
|
|
/* Units that might be subject to StopWhenUnneeded= clean-up */
|
|
|
|
LIST_HEAD(Unit, stop_when_unneeded_queue);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event *event;
|
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
/* This maps PIDs we care about to units that are interested in. We allow multiple units to he interested in
|
|
|
|
* the same PID and multiple PIDs to be relevant to the same unit. Since in most cases only a single unit will
|
|
|
|
* be interested in the same PID we use a somewhat special encoding here: the first unit interested in a PID is
|
|
|
|
* stored directly in the hashmap, keyed by the PID unmodified. If there are other units interested too they'll
|
|
|
|
* be stored in a NULL-terminated array, and keyed by the negative PID. This is safe as pid_t is signed and
|
|
|
|
* negative PIDs are not used for regular processes but process groups, which we don't care about in this
|
|
|
|
* context, but this allows us to use the negative range for our own purposes. */
|
|
|
|
Hashmap *watch_pids; /* pid => unit as well as -pid => array of units */
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2014-05-15 17:09:34 +02:00
|
|
|
/* A set contains all units which cgroup should be refreshed after startup */
|
|
|
|
Set *startup_units;
|
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
/* A set which contains all currently failed units */
|
|
|
|
Set *failed_units;
|
|
|
|
|
2013-11-25 15:22:41 +01:00
|
|
|
sd_event_source *run_queue_event_source;
|
|
|
|
|
2010-06-18 23:12:48 +02:00
|
|
|
char *notify_socket;
|
2013-11-19 21:12:59 +01:00
|
|
|
int notify_fd;
|
|
|
|
sd_event_source *notify_event_source;
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
int cgroups_agent_fd;
|
|
|
|
sd_event_source *cgroups_agent_event_source;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
int signal_fd;
|
|
|
|
sd_event_source *signal_event_source;
|
2010-06-18 23:12:48 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
sd_event_source *sigchld_event_source;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
int time_change_fd;
|
|
|
|
sd_event_source *time_change_event_source;
|
2010-04-21 04:01:24 +02:00
|
|
|
|
2018-05-28 21:33:10 +02:00
|
|
|
sd_event_source *timezone_change_event_source;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source *jobs_in_progress_event_source;
|
2010-01-27 04:31:52 +01:00
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
int user_lookup_fds[2];
|
|
|
|
sd_event_source *user_lookup_event_source;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
UnitFileScope unit_file_scope;
|
2010-06-15 14:45:15 +02:00
|
|
|
LookupPaths lookup_paths;
|
2019-07-18 13:11:28 +02:00
|
|
|
Hashmap *unit_id_map;
|
|
|
|
Hashmap *unit_name_map;
|
2010-07-11 00:52:00 +02:00
|
|
|
Set *unit_path_cache;
|
2019-07-10 18:01:13 +02:00
|
|
|
usec_t unit_cache_mtime;
|
2010-02-13 01:07:02 +01:00
|
|
|
|
2018-10-31 15:49:19 +01:00
|
|
|
char **transient_environment; /* The environment, as determined from config files, kernel cmdline and environment generators */
|
|
|
|
char **client_environment; /* Environment variables created by clients through the bus API */
|
2010-05-09 23:53:52 +02:00
|
|
|
|
2020-04-22 16:16:47 +02:00
|
|
|
usec_t watchdog[_WATCHDOG_TYPE_MAX];
|
|
|
|
usec_t watchdog_overridden[_WATCHDOG_TYPE_MAX];
|
2012-04-05 22:08:10 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp timestamps[_MANAGER_TIMESTAMP_MAX];
|
2010-04-16 23:24:39 +02:00
|
|
|
|
2013-11-25 21:08:39 +01:00
|
|
|
/* Data specific to the device subsystem */
|
2018-10-08 15:47:29 +02:00
|
|
|
sd_device_monitor *device_monitor;
|
2010-07-20 20:33:19 +02:00
|
|
|
Hashmap *devices_by_sysfs;
|
2010-01-29 06:04:08 +01:00
|
|
|
|
|
|
|
/* Data specific to the mount subsystem */
|
2015-06-01 13:48:01 +02:00
|
|
|
struct libmnt_monitor *mount_monitor;
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source *mount_event_source;
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2010-05-09 18:44:11 +02:00
|
|
|
/* Data specific to the swap filesystem */
|
|
|
|
FILE *proc_swaps;
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source *swap_event_source;
|
2013-11-25 21:08:39 +01:00
|
|
|
Hashmap *swaps_by_devnode;
|
2010-05-09 18:44:11 +02:00
|
|
|
|
2010-02-01 03:33:24 +01:00
|
|
|
/* Data specific to the D-Bus subsystem */
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_bus *api_bus, *system_bus;
|
|
|
|
Set *private_buses;
|
|
|
|
int private_listen_fd;
|
|
|
|
sd_event_source *private_listen_event_source;
|
2014-03-03 01:33:45 +01:00
|
|
|
|
|
|
|
/* Contains all the clients that are subscribed to signals via
|
|
|
|
the API bus. Note that private bus connections are always
|
|
|
|
considered subscribes, since they last for very short only,
|
|
|
|
and it is much simpler that way. */
|
|
|
|
sd_bus_track *subscribed;
|
|
|
|
char **deserialized_subscribed;
|
2010-06-19 03:04:04 +02:00
|
|
|
|
2015-04-29 19:02:08 +02:00
|
|
|
/* This is used during reloading: before the reload we queue
|
|
|
|
* the reply message here, and afterwards we send it */
|
2018-11-13 11:59:06 +01:00
|
|
|
sd_bus_message *pending_reload_message;
|
2010-03-31 16:29:55 +02:00
|
|
|
|
2010-04-15 23:16:16 +02:00
|
|
|
Hashmap *watch_bus; /* D-Bus names => Unit object n:1 */
|
|
|
|
|
2013-07-10 21:10:53 +02:00
|
|
|
bool send_reloading_done;
|
|
|
|
|
2010-08-11 22:37:10 +02:00
|
|
|
uint32_t current_job_id;
|
2012-02-02 12:39:33 +01:00
|
|
|
uint32_t default_unit_job_id;
|
2010-08-11 22:37:10 +02:00
|
|
|
|
2010-04-21 04:01:24 +02:00
|
|
|
/* Data specific to the Automount subsystem */
|
|
|
|
int dev_autofs_fd;
|
|
|
|
|
2010-03-31 16:29:55 +02:00
|
|
|
/* Data specific to the cgroup subsystem */
|
2013-06-27 04:14:27 +02:00
|
|
|
Hashmap *cgroup_unit;
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
CGroupMask cgroup_supported;
|
2013-06-20 03:45:08 +02:00
|
|
|
char *cgroup_root;
|
2010-04-10 17:53:17 +02:00
|
|
|
|
2017-09-26 22:43:08 +02:00
|
|
|
/* Notifications from cgroups, when the unified hierarchy is used is done via inotify. */
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
int cgroup_inotify_fd;
|
|
|
|
sd_event_source *cgroup_inotify_event_source;
|
2019-03-19 19:05:19 +01:00
|
|
|
|
|
|
|
/* Maps for finding the unit for each inotify watch descriptor for the cgroup.events and
|
|
|
|
* memory.events cgroupv2 attributes. */
|
2019-03-19 17:17:31 +01:00
|
|
|
Hashmap *cgroup_control_inotify_wd_unit;
|
2019-03-19 19:05:19 +01:00
|
|
|
Hashmap *cgroup_memory_inotify_wd_unit;
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2017-09-26 22:43:08 +02:00
|
|
|
/* A defer event for handling cgroup empty events and processing them after SIGCHLD in all cases. */
|
|
|
|
sd_event_source *cgroup_empty_event_source;
|
2019-03-19 19:05:19 +01:00
|
|
|
sd_event_source *cgroup_oom_event_source;
|
2017-09-26 22:43:08 +02:00
|
|
|
|
2011-02-21 15:32:17 +01:00
|
|
|
/* Make sure the user cannot accidentally unmount our cgroup
|
2010-06-18 20:15:34 +02:00
|
|
|
* file system */
|
|
|
|
int pin_cgroupfs_fd;
|
|
|
|
|
2017-10-05 15:04:19 +02:00
|
|
|
unsigned gc_marker;
|
core: unified cgroup hierarchy support
This patch set adds full support the new unified cgroup hierarchy logic
of modern kernels.
A new kernel command line option "systemd.unified_cgroup_hierarchy=1" is
added. If specified the unified hierarchy is mounted to /sys/fs/cgroup
instead of a tmpfs. No further hierarchies are mounted. The kernel
command line option defaults to off. We can turn it on by default as
soon as the kernel's APIs regarding this are stabilized (but even then
downstream distros might want to turn this off, as this will break any
tools that access cgroupfs directly).
It is possibly to choose for each boot individually whether the unified
or the legacy hierarchy is used. nspawn will by default provide the
legacy hierarchy to containers if the host is using it, and the unified
otherwise. However it is possible to run containers with the unified
hierarchy on a legacy host and vice versa, by setting the
$UNIFIED_CGROUP_HIERARCHY environment variable for nspawn to 1 or 0,
respectively.
The unified hierarchy provides reliable cgroup empty notifications for
the first time, via inotify. To make use of this we maintain one
manager-wide inotify fd, and each cgroup to it.
This patch also removes cg_delete() which is unused now.
On kernel 4.2 only the "memory" controller is compatible with the
unified hierarchy, hence that's the only controller systemd exposes when
booted in unified heirarchy mode.
This introduces a new enum for enumerating supported controllers, plus a
related enum for the mask bits mapping to it. The core is changed to
make use of this everywhere.
This moves PID 1 into a new "init.scope" implicit scope unit in the root
slice. This is necessary since on the unified hierarchy cgroups may
either contain subgroups or processes but not both. PID 1 hence has to
move out of the root cgroup (strictly speaking the root cgroup is the
only one where processes and subgroups are still allowed, but in order
to support containers nicey, we move PID 1 into the new scope in all
cases.) This new unit is also used on legacy hierarchy setups. It's
actually pretty useful on all systems, as it can then be used to filter
journal messages coming from PID 1, and so on.
The root slice ("-.slice") is now implicitly created and started (and
does not require a unit file on disk anymore), since
that's where "init.scope" is located and the slice needs to be started
before the scope can.
To check whether we are in unified or legacy hierarchy mode we use
statfs() on /sys/fs/cgroup. If the .f_type field reports tmpfs we are in
legacy mode, if it reports cgroupfs we are in unified mode.
This patch set carefuly makes sure that cgls and cgtop continue to work
as desired.
When invoking nspawn as a service it will implicitly create two
subcgroups in the cgroup it is using, one to move the nspawn process
into, the other to move the actual container processes into. This is
done because of the requirement that cgroups may either contain
processes or other subgroups.
2015-09-01 19:22:36 +02:00
|
|
|
|
2018-05-28 21:33:10 +02:00
|
|
|
/* The stat() data the last time we saw /etc/localtime */
|
|
|
|
usec_t etc_localtime_mtime;
|
|
|
|
bool etc_localtime_accessible:1;
|
|
|
|
|
2018-10-09 15:42:19 +02:00
|
|
|
ManagerObjective objective:5;
|
2010-04-18 03:07:42 +02:00
|
|
|
|
2018-10-09 15:42:19 +02:00
|
|
|
/* Flags */
|
2010-04-21 04:01:24 +02:00
|
|
|
bool dispatching_load_queue:1;
|
|
|
|
|
2011-03-30 00:47:50 +02:00
|
|
|
bool taint_usr:1;
|
2017-09-16 11:19:43 +02:00
|
|
|
|
2018-01-21 13:17:54 +01:00
|
|
|
/* Have we already sent out the READY=1 notification? */
|
2017-10-24 14:48:54 +02:00
|
|
|
bool ready_sent:1;
|
|
|
|
|
2018-01-21 13:17:54 +01:00
|
|
|
/* Have we already printed the taint line if necessary? */
|
|
|
|
bool taint_logged:1;
|
|
|
|
|
2018-01-17 18:50:27 +01:00
|
|
|
/* Have we ever changed the "kernel.pid_max" sysctl? */
|
|
|
|
bool sysctl_pid_max_changed:1;
|
|
|
|
|
2018-10-09 16:15:54 +02:00
|
|
|
ManagerTestRunFlags test_run_flags:8;
|
2014-01-07 14:41:24 +01:00
|
|
|
|
2015-09-18 13:37:34 +02:00
|
|
|
/* If non-zero, exit with the following value when the systemd
|
|
|
|
* process terminate. Useful for containers: systemd-nspawn could get
|
|
|
|
* the return value. */
|
|
|
|
uint8_t return_value;
|
|
|
|
|
2014-01-28 04:27:07 +01:00
|
|
|
ShowStatus show_status;
|
2020-04-27 11:06:34 +02:00
|
|
|
ShowStatus show_status_overridden;
|
2019-06-06 19:22:20 +02:00
|
|
|
StatusUnitFormat status_unit_format;
|
2016-11-02 10:38:22 +01:00
|
|
|
char *confirm_spawn;
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
bool no_console_output;
|
2017-03-20 13:10:43 +01:00
|
|
|
bool service_watchdogs;
|
2010-08-25 03:11:26 +02:00
|
|
|
|
2011-02-15 11:52:29 +01:00
|
|
|
ExecOutput default_std_output, default_std_error;
|
|
|
|
|
2014-02-24 23:50:10 +01:00
|
|
|
usec_t default_restart_usec, default_timeout_start_usec, default_timeout_stop_usec;
|
2017-11-29 07:43:44 +01:00
|
|
|
usec_t default_timeout_abort_usec;
|
|
|
|
bool default_timeout_abort_set;
|
2013-11-04 17:47:43 +01:00
|
|
|
|
2013-11-08 16:01:22 +01:00
|
|
|
usec_t default_start_limit_interval;
|
|
|
|
unsigned default_start_limit_burst;
|
|
|
|
|
2014-02-24 23:50:10 +01:00
|
|
|
bool default_cpu_accounting;
|
|
|
|
bool default_memory_accounting;
|
2016-05-05 22:42:55 +02:00
|
|
|
bool default_io_accounting;
|
2014-02-24 23:50:10 +01:00
|
|
|
bool default_blockio_accounting;
|
2015-09-10 12:32:16 +02:00
|
|
|
bool default_tasks_accounting;
|
2017-09-01 16:04:50 +02:00
|
|
|
bool default_ip_accounting;
|
2014-02-24 23:50:10 +01:00
|
|
|
|
2019-11-05 13:50:28 +01:00
|
|
|
TasksMax default_tasks_max;
|
2014-03-24 16:22:34 +01:00
|
|
|
usec_t default_timer_accuracy_usec;
|
|
|
|
|
2019-03-19 19:05:19 +01:00
|
|
|
OOMPolicy default_oom_policy;
|
|
|
|
|
2018-05-30 17:57:23 +02:00
|
|
|
int original_log_level;
|
2018-06-01 18:21:03 +02:00
|
|
|
LogTarget original_log_target;
|
2018-05-30 17:57:23 +02:00
|
|
|
bool log_level_overridden:1;
|
2018-06-01 18:21:03 +02:00
|
|
|
bool log_target_overridden:1;
|
2018-05-30 17:57:23 +02:00
|
|
|
|
2014-03-05 02:29:58 +01:00
|
|
|
struct rlimit *rlimit[_RLIMIT_MAX];
|
2012-03-21 18:03:40 +01:00
|
|
|
|
2011-07-06 00:47:39 +02:00
|
|
|
/* non-zero if we are reloading or reexecuting, */
|
|
|
|
int n_reloading;
|
2010-09-21 03:51:31 +02:00
|
|
|
|
|
|
|
unsigned n_installed_jobs;
|
2010-09-23 15:38:42 +02:00
|
|
|
unsigned n_failed_jobs;
|
2012-04-24 14:28:00 +02:00
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
/* Jobs in progress watching */
|
2013-02-26 12:09:41 +01:00
|
|
|
unsigned n_running_jobs;
|
2013-02-28 00:01:10 +01:00
|
|
|
unsigned n_on_console;
|
2013-02-28 00:03:22 +01:00
|
|
|
unsigned jobs_in_progress_iteration;
|
2013-02-26 12:09:41 +01:00
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
/* Do we have any outstanding password prompts? */
|
|
|
|
int have_ask_password;
|
|
|
|
int ask_password_inotify_fd;
|
|
|
|
sd_event_source *ask_password_event_source;
|
|
|
|
|
2012-04-24 14:28:00 +02:00
|
|
|
/* Type=idle pipes */
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
int idle_pipe[4];
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source *idle_pipe_event_source;
|
2012-05-09 01:24:50 +02:00
|
|
|
|
|
|
|
char *switch_root;
|
|
|
|
char *switch_root_init;
|
2013-09-26 20:14:24 +02:00
|
|
|
|
|
|
|
/* This maps all possible path prefixes to the units needing
|
|
|
|
* them. It's a hashmap with a path string as key and a Set as
|
|
|
|
* value where Unit objects are contained. */
|
|
|
|
Hashmap *units_requiring_mounts_for;
|
2013-11-30 03:53:42 +01:00
|
|
|
|
2014-08-06 11:45:36 +02:00
|
|
|
/* Used for processing polkit authorization responses */
|
|
|
|
Hashmap *polkit_registry;
|
2015-01-28 02:18:59 +01:00
|
|
|
|
2016-07-14 12:37:28 +02:00
|
|
|
/* Dynamic users/groups, indexed by their name */
|
|
|
|
Hashmap *dynamic_users;
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
/* Keep track of all UIDs and GIDs any of our services currently use. This is useful for the RemoveIPC= logic. */
|
|
|
|
Hashmap *uid_refs;
|
|
|
|
Hashmap *gid_refs;
|
|
|
|
|
2018-02-06 08:00:34 +01:00
|
|
|
/* ExecRuntime, indexed by their owner unit id */
|
|
|
|
Hashmap *exec_runtime_by_id;
|
|
|
|
|
2016-10-07 03:08:21 +02:00
|
|
|
/* When the user hits C-A-D more than 7 times per 2s, do something immediately... */
|
2015-01-28 02:18:59 +01:00
|
|
|
RateLimit ctrl_alt_del_ratelimit;
|
2016-10-18 12:16:32 +02:00
|
|
|
EmergencyAction cad_burst_action;
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
|
|
|
|
const char *unit_log_field;
|
|
|
|
const char *unit_log_format_string;
|
2015-09-01 02:34:19 +02:00
|
|
|
|
2016-08-30 23:18:46 +02:00
|
|
|
const char *invocation_log_field;
|
|
|
|
const char *invocation_log_format_string;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
int first_boot; /* tri-state */
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
/* Prefixes of e.g. RuntimeDirectory= */
|
2017-09-28 16:58:43 +02:00
|
|
|
char *prefix[_EXEC_DIRECTORY_TYPE_MAX];
|
2020-07-23 08:49:52 +02:00
|
|
|
char *received_credentials;
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
|
|
|
|
/* Used in the SIGCHLD and sd_notify() message invocation logic to avoid that we dispatch the same event
|
|
|
|
* multiple times on the same unit. */
|
|
|
|
unsigned sigchldgen;
|
|
|
|
unsigned notifygen;
|
2019-03-15 11:37:43 +01:00
|
|
|
|
|
|
|
bool honor_device_enumeration;
|
2019-08-07 14:58:59 +02:00
|
|
|
|
|
|
|
VarlinkServer *varlink_server;
|
2009-11-18 00:42:52 +01:00
|
|
|
};
|
|
|
|
|
2017-11-29 07:43:44 +01:00
|
|
|
static inline usec_t manager_default_timeout_abort_usec(Manager *m) {
|
2019-04-14 13:46:24 +02:00
|
|
|
assert(m);
|
2017-11-29 07:43:44 +01:00
|
|
|
return m->default_timeout_abort_set ? m->default_timeout_abort_usec : m->default_timeout_stop_usec;
|
|
|
|
}
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
#define MANAGER_IS_SYSTEM(m) ((m)->unit_file_scope == UNIT_FILE_SYSTEM)
|
|
|
|
#define MANAGER_IS_USER(m) ((m)->unit_file_scope != UNIT_FILE_SYSTEM)
|
|
|
|
|
2016-02-24 21:36:09 +01:00
|
|
|
#define MANAGER_IS_RELOADING(m) ((m)->n_reloading > 0)
|
|
|
|
|
2017-11-20 21:24:59 +01:00
|
|
|
#define MANAGER_IS_FINISHED(m) (dual_timestamp_is_set((m)->timestamps + MANAGER_TIMESTAMP_FINISH))
|
|
|
|
|
2018-10-09 15:42:19 +02:00
|
|
|
/* The objective is set to OK as soon as we enter the main loop, and set otherwise as soon as we are done with it */
|
|
|
|
#define MANAGER_IS_RUNNING(m) ((m)->objective == MANAGER_OK)
|
2018-01-23 16:43:56 +01:00
|
|
|
|
2018-10-09 16:15:54 +02:00
|
|
|
#define MANAGER_IS_TEST_RUN(m) ((m)->test_run_flags != 0)
|
|
|
|
|
|
|
|
int manager_new(UnitFileScope scope, ManagerTestRunFlags test_run_flags, Manager **m);
|
2014-11-08 16:06:12 +01:00
|
|
|
Manager* manager_free(Manager *m);
|
2018-03-09 21:55:55 +01:00
|
|
|
DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
int manager_startup(Manager *m, FILE *serialization, FDSet *fds);
|
2010-01-29 03:18:09 +01:00
|
|
|
|
2009-11-18 00:42:52 +01:00
|
|
|
Job *manager_get_job(Manager *m, uint32_t id);
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *manager_get_unit(Manager *m, const char *name);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-02-02 12:42:08 +01:00
|
|
|
int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j);
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2020-07-03 19:45:19 +02:00
|
|
|
bool manager_unit_file_maybe_loadable_from_cache(Unit *u);
|
2013-11-19 21:12:59 +01:00
|
|
|
int manager_load_unit_prepare(Manager *m, const char *name, const char *path, sd_bus_error *e, Unit **_ret);
|
|
|
|
int manager_load_unit(Manager *m, const char *name, const char *path, sd_bus_error *e, Unit **_ret);
|
2018-04-12 15:13:14 +02:00
|
|
|
int manager_load_startable_unit_or_warn(Manager *m, const char *name, const char *path, Unit **ret);
|
2013-11-19 21:12:59 +01:00
|
|
|
int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u);
|
2010-04-13 01:59:06 +02:00
|
|
|
|
2019-03-22 20:57:30 +01:00
|
|
|
int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **_ret);
|
|
|
|
int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, sd_bus_error *e, Job **_ret);
|
|
|
|
int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Set *affected_jobs, Job **ret);
|
2017-08-07 11:27:24 +02:00
|
|
|
int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
void manager_dump_units(Manager *s, FILE *f, const char *prefix);
|
2010-01-20 04:02:39 +01:00
|
|
|
void manager_dump_jobs(Manager *s, FILE *f, const char *prefix);
|
2017-11-20 21:11:32 +01:00
|
|
|
void manager_dump(Manager *s, FILE *f, const char *prefix);
|
2017-11-20 21:20:44 +01:00
|
|
|
int manager_get_dump_string(Manager *m, char **ret);
|
2010-01-19 00:22:34 +01:00
|
|
|
|
2010-01-20 05:03:52 +01:00
|
|
|
void manager_clear_jobs(Manager *m);
|
|
|
|
|
2019-03-18 20:59:36 +01:00
|
|
|
void manager_unwatch_pid(Manager *m, pid_t pid);
|
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
unsigned manager_dispatch_load_queue(Manager *m);
|
2010-01-29 03:18:09 +01:00
|
|
|
|
2018-11-19 12:23:13 +01:00
|
|
|
int manager_default_environment(Manager *m);
|
2018-10-31 15:49:19 +01:00
|
|
|
int manager_transient_environment_add(Manager *m, char **plus);
|
|
|
|
int manager_client_environment_modify(Manager *m, char **minus, char **plus);
|
|
|
|
int manager_get_effective_environment(Manager *m, char ***ret);
|
|
|
|
|
2012-03-21 18:03:40 +01:00
|
|
|
int manager_set_default_rlimits(Manager *m, struct rlimit **default_rlimit);
|
2010-08-30 21:31:40 +02:00
|
|
|
|
2010-01-24 00:39:29 +01:00
|
|
|
int manager_loop(Manager *m);
|
2010-01-23 22:56:47 +01:00
|
|
|
|
2010-07-20 20:54:33 +02:00
|
|
|
int manager_open_serialization(Manager *m, FILE **_f);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2013-04-08 14:05:24 +02:00
|
|
|
int manager_serialize(Manager *m, FILE *f, FDSet *fds, bool switching_root);
|
2010-04-21 03:27:44 +02:00
|
|
|
int manager_deserialize(Manager *m, FILE *f, FDSet *fds);
|
|
|
|
|
|
|
|
int manager_reload(Manager *m);
|
|
|
|
|
2010-08-31 00:23:34 +02:00
|
|
|
void manager_reset_failed(Manager *m);
|
2010-07-18 04:58:01 +02:00
|
|
|
|
2010-08-11 01:43:23 +02:00
|
|
|
void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success);
|
2010-10-06 03:55:49 +02:00
|
|
|
void manager_send_unit_plymouth(Manager *m, Unit *u);
|
2010-08-11 01:43:23 +02:00
|
|
|
|
2013-04-26 02:57:41 +02:00
|
|
|
bool manager_unit_inactive_or_pending(Manager *m, const char *name);
|
2010-09-01 03:30:59 +02:00
|
|
|
|
2010-09-21 04:14:38 +02:00
|
|
|
void manager_check_finished(Manager *m);
|
|
|
|
|
2019-09-18 21:02:07 +02:00
|
|
|
void disable_printk_ratelimit(void);
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
void manager_recheck_dbus(Manager *m);
|
2012-01-12 05:09:06 +01:00
|
|
|
void manager_recheck_journal(Manager *m);
|
2011-03-18 04:31:22 +01:00
|
|
|
|
2020-04-27 11:06:34 +02:00
|
|
|
bool manager_get_show_status_on(Manager *m);
|
2020-02-29 10:59:27 +01:00
|
|
|
void manager_set_show_status(Manager *m, ShowStatus mode, const char *reason);
|
2020-06-04 13:25:25 +02:00
|
|
|
void manager_override_show_status(Manager *m, ShowStatus mode, const char *reason);
|
2020-04-27 11:06:34 +02:00
|
|
|
|
2014-07-07 19:25:31 +02:00
|
|
|
void manager_set_first_boot(Manager *m, bool b);
|
|
|
|
|
2014-10-28 04:02:54 +01:00
|
|
|
void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) _printf_(4,5);
|
2012-11-22 00:38:55 +01:00
|
|
|
|
2013-09-26 20:14:24 +02:00
|
|
|
Set *manager_get_units_requiring_mounts_for(Manager *m, const char *path);
|
2014-03-03 17:14:07 +01:00
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
ManagerState manager_state(Manager *m);
|
|
|
|
|
2015-09-11 17:25:35 +02:00
|
|
|
int manager_update_failed_units(Manager *m, Unit *u, bool failed);
|
2015-02-18 17:22:37 +01:00
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now);
|
|
|
|
int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc);
|
|
|
|
|
|
|
|
void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now);
|
|
|
|
int manager_ref_gid(Manager *m, gid_t gid, bool destroy_now);
|
|
|
|
|
2017-12-07 11:27:07 +01:00
|
|
|
char *manager_taint_string(Manager *m);
|
|
|
|
|
2018-01-24 19:59:55 +01:00
|
|
|
void manager_ref_console(Manager *m);
|
|
|
|
void manager_unref_console(Manager *m);
|
|
|
|
|
2018-05-30 17:57:23 +02:00
|
|
|
void manager_override_log_level(Manager *m, int level);
|
|
|
|
void manager_restore_original_log_level(Manager *m);
|
|
|
|
|
2018-06-01 18:21:03 +02:00
|
|
|
void manager_override_log_target(Manager *m, LogTarget target);
|
|
|
|
void manager_restore_original_log_target(Manager *m);
|
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
const char *manager_state_to_string(ManagerState m) _const_;
|
|
|
|
ManagerState manager_state_from_string(const char *s) _pure_;
|
2016-11-02 10:38:22 +01:00
|
|
|
|
|
|
|
const char *manager_get_confirm_spawn(Manager *m);
|
2016-11-15 09:29:04 +01:00
|
|
|
bool manager_is_confirm_spawn_disabled(Manager *m);
|
|
|
|
void manager_disable_confirm_spawn(void);
|
2017-11-20 21:01:13 +01:00
|
|
|
|
|
|
|
const char *manager_timestamp_to_string(ManagerTimestamp m) _const_;
|
|
|
|
ManagerTimestamp manager_timestamp_from_string(const char *s) _pure_;
|
2018-07-22 06:41:44 +02:00
|
|
|
ManagerTimestamp manager_timestamp_initrd_mangle(ManagerTimestamp s);
|
2019-03-19 19:05:19 +01:00
|
|
|
|
2020-04-22 16:16:47 +02:00
|
|
|
usec_t manager_get_watchdog(Manager *m, WatchdogType t);
|
|
|
|
void manager_set_watchdog(Manager *m, WatchdogType t, usec_t timeout);
|
2020-06-04 13:25:25 +02:00
|
|
|
int manager_override_watchdog(Manager *m, WatchdogType t, usec_t timeout);
|
2020-04-22 16:16:47 +02:00
|
|
|
|
2019-03-19 19:05:19 +01:00
|
|
|
const char* oom_policy_to_string(OOMPolicy i) _const_;
|
|
|
|
OOMPolicy oom_policy_from_string(const char *s) _pure_;
|