2017-11-18 17:09:20 +01:00
|
|
|
/* SPDX-License-Identifier: LGPL-2.1+ */
|
2010-02-03 13:03:47 +01:00
|
|
|
/***
|
|
|
|
This file is part of systemd.
|
|
|
|
|
|
|
|
Copyright 2010 Lennart Poettering
|
|
|
|
***/
|
|
|
|
|
2009-11-18 00:42:52 +01:00
|
|
|
#include <errno.h>
|
2015-09-22 23:24:07 +02:00
|
|
|
#include <fcntl.h>
|
|
|
|
#include <linux/kd.h>
|
2010-01-24 00:39:29 +01:00
|
|
|
#include <signal.h>
|
2017-11-20 21:20:44 +01:00
|
|
|
#include <stdio_ext.h>
|
2015-09-22 23:24:07 +02:00
|
|
|
#include <string.h>
|
2014-10-26 02:30:51 +02:00
|
|
|
#include <sys/epoll.h>
|
2015-09-22 23:24:07 +02:00
|
|
|
#include <sys/inotify.h>
|
2010-02-14 22:39:40 +01:00
|
|
|
#include <sys/ioctl.h>
|
2015-09-22 23:24:07 +02:00
|
|
|
#include <sys/reboot.h>
|
2012-11-25 00:32:40 +01:00
|
|
|
#include <sys/timerfd.h>
|
2015-09-22 23:24:07 +02:00
|
|
|
#include <sys/wait.h>
|
|
|
|
#include <unistd.h>
|
2010-08-11 15:19:50 +02:00
|
|
|
|
2017-10-03 10:41:51 +02:00
|
|
|
#if HAVE_AUDIT
|
2010-08-11 01:43:23 +02:00
|
|
|
#include <libaudit.h>
|
2010-08-11 15:19:50 +02:00
|
|
|
#endif
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
#include "sd-daemon.h"
|
|
|
|
#include "sd-messages.h"
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
#include "sd-path.h"
|
2012-01-05 16:01:58 +01:00
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
#include "alloc-util.h"
|
2018-05-15 20:17:34 +02:00
|
|
|
#include "all-units.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "audit-fd.h"
|
|
|
|
#include "boot-timestamps.h"
|
|
|
|
#include "bus-common-errors.h"
|
|
|
|
#include "bus-error.h"
|
|
|
|
#include "bus-kernel.h"
|
|
|
|
#include "bus-util.h"
|
2016-08-01 19:24:40 +02:00
|
|
|
#include "clean-ipc.h"
|
2017-12-07 11:27:07 +01:00
|
|
|
#include "clock-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "dbus-job.h"
|
|
|
|
#include "dbus-manager.h"
|
|
|
|
#include "dbus-unit.h"
|
|
|
|
#include "dbus.h"
|
2016-02-26 18:28:45 +01:00
|
|
|
#include "dirent-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "env-util.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "escape.h"
|
2017-01-22 18:35:08 +01:00
|
|
|
#include "exec-util.h"
|
core: implement /run/systemd/units/-based path for passing unit info from PID 1 to journald
And let's make use of it to implement two new unit settings with it:
1. LogLevelMax= is a new per-unit setting that may be used to configure
log priority filtering: set it to LogLevelMax=notice and only
messages of level "notice" and lower (i.e. more important) will be
processed, all others are dropped.
2. LogExtraFields= is a new per-unit setting for configuring per-unit
journal fields, that are implicitly included in every log record
generated by the unit's processes. It takes field/value pairs in the
form of FOO=BAR.
Also, related to this, one exisiting unit setting is ported to this new
facility:
3. The invocation ID is now pulled from /run/systemd/units/ instead of
cgroupfs xattrs. This substantially relaxes requirements of systemd
on the kernel version and the privileges it runs with (specifically,
cgroupfs xattrs are not available in containers, since they are
stored in kernel memory, and hence are unsafe to permit to lesser
privileged code).
/run/systemd/units/ is a new directory, which contains a number of files
and symlinks encoding the above information. PID 1 creates and manages
these files, and journald reads them from there.
Note that this is supposed to be a direct path between PID 1 and the
journal only, due to the special runtime environment the journal runs
in. Normally, today we shouldn't introduce new interfaces that (mis-)use
a file system as IPC framework, and instead just an IPC system, but this
is very hard to do between the journal and PID 1, as long as the IPC
system is a subject PID 1 manages, and itself a client to the journal.
This patch cleans up a couple of types used in journal code:
specifically we switch to size_t for a couple of memory-sizing values,
as size_t is the right choice for everything that is memory.
Fixes: #4089
Fixes: #3041
Fixes: #4441
2017-11-02 19:43:32 +01:00
|
|
|
#include "execute.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "exit-status.h"
|
2015-10-25 13:14:12 +01:00
|
|
|
#include "fd-util.h"
|
2015-10-26 18:05:03 +01:00
|
|
|
#include "fileio.h"
|
2015-10-26 21:16:26 +01:00
|
|
|
#include "fs-util.h"
|
2009-11-18 00:42:52 +01:00
|
|
|
#include "hashmap.h"
|
2015-10-25 14:08:25 +01:00
|
|
|
#include "io-util.h"
|
core: implement /run/systemd/units/-based path for passing unit info from PID 1 to journald
And let's make use of it to implement two new unit settings with it:
1. LogLevelMax= is a new per-unit setting that may be used to configure
log priority filtering: set it to LogLevelMax=notice and only
messages of level "notice" and lower (i.e. more important) will be
processed, all others are dropped.
2. LogExtraFields= is a new per-unit setting for configuring per-unit
journal fields, that are implicitly included in every log record
generated by the unit's processes. It takes field/value pairs in the
form of FOO=BAR.
Also, related to this, one exisiting unit setting is ported to this new
facility:
3. The invocation ID is now pulled from /run/systemd/units/ instead of
cgroupfs xattrs. This substantially relaxes requirements of systemd
on the kernel version and the privileges it runs with (specifically,
cgroupfs xattrs are not available in containers, since they are
stored in kernel memory, and hence are unsafe to permit to lesser
privileged code).
/run/systemd/units/ is a new directory, which contains a number of files
and symlinks encoding the above information. PID 1 creates and manages
these files, and journald reads them from there.
Note that this is supposed to be a direct path between PID 1 and the
journal only, due to the special runtime environment the journal runs
in. Normally, today we shouldn't introduce new interfaces that (mis-)use
a file system as IPC framework, and instead just an IPC system, but this
is very hard to do between the journal and PID 1, as long as the IPC
system is a subject PID 1 manages, and itself a client to the journal.
This patch cleans up a couple of types used in journal code:
specifically we switch to size_t for a couple of memory-sizing values,
as size_t is the right choice for everything that is memory.
Fixes: #4089
Fixes: #3041
Fixes: #4441
2017-11-02 19:43:32 +01:00
|
|
|
#include "label.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "locale-setup.h"
|
2010-01-20 19:19:53 +01:00
|
|
|
#include "log.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "macro.h"
|
2015-10-25 13:14:12 +01:00
|
|
|
#include "manager.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "missing.h"
|
2012-04-10 21:54:31 +02:00
|
|
|
#include "mkdir.h"
|
2015-10-26 16:18:16 +01:00
|
|
|
#include "parse-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "path-lookup.h"
|
|
|
|
#include "path-util.h"
|
|
|
|
#include "process-util.h"
|
2010-02-01 03:33:24 +01:00
|
|
|
#include "ratelimit.h"
|
2018-05-03 19:05:59 +02:00
|
|
|
#include "rlimit-util.h"
|
2015-04-04 11:52:57 +02:00
|
|
|
#include "rm-rf.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "signal-util.h"
|
2018-05-15 20:17:34 +02:00
|
|
|
#include "socket-util.h"
|
2010-06-18 04:22:59 +02:00
|
|
|
#include "special.h"
|
2015-10-26 22:01:44 +01:00
|
|
|
#include "stat-util.h"
|
2015-10-26 22:31:05 +01:00
|
|
|
#include "string-table.h"
|
2015-10-24 22:58:24 +02:00
|
|
|
#include "string-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "strv.h"
|
2017-12-25 05:08:23 +01:00
|
|
|
#include "strxcpyx.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "terminal-util.h"
|
|
|
|
#include "time-util.h"
|
|
|
|
#include "transaction.h"
|
2015-10-26 23:20:41 +01:00
|
|
|
#include "umask-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "unit-name.h"
|
2016-08-01 19:24:40 +02:00
|
|
|
#include "user-util.h"
|
2015-09-22 23:24:07 +02:00
|
|
|
#include "util.h"
|
2011-09-23 17:20:45 +02:00
|
|
|
#include "virt.h"
|
2012-04-05 22:08:10 +02:00
|
|
|
#include "watchdog.h"
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2015-10-30 11:27:29 +01:00
|
|
|
#define NOTIFY_RCVBUF_SIZE (8*1024*1024)
|
2016-05-04 20:43:23 +02:00
|
|
|
#define CGROUPS_AGENT_RCVBUF_SIZE (8*1024*1024)
|
2015-10-30 11:27:29 +01:00
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
/* Initial delay and the interval for printing status messages about running jobs */
|
2014-01-27 07:15:27 +01:00
|
|
|
#define JOBS_IN_PROGRESS_WAIT_USEC (5*USEC_PER_SEC)
|
|
|
|
#define JOBS_IN_PROGRESS_PERIOD_USEC (USEC_PER_SEC / 3)
|
2013-02-28 00:03:22 +01:00
|
|
|
#define JOBS_IN_PROGRESS_PERIOD_DIVISOR 3
|
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
/* If there are more than 1K bus messages queue across our API and direct busses, then let's not add more on top until
|
|
|
|
* the queue gets more empty. */
|
|
|
|
#define MANAGER_BUS_BUSY_THRESHOLD 1024LU
|
|
|
|
|
|
|
|
/* How many units and jobs to process of the bus queue before returning to the event loop. */
|
|
|
|
#define MANAGER_BUS_MESSAGE_BUDGET 100U
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
2016-05-04 20:43:23 +02:00
|
|
|
static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
|
|
|
static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
|
|
|
static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
2016-08-01 19:24:40 +02:00
|
|
|
static int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata);
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata);
|
2013-11-25 15:22:41 +01:00
|
|
|
static int manager_dispatch_run_queue(sd_event_source *source, void *userdata);
|
2018-01-23 18:18:13 +01:00
|
|
|
static int manager_dispatch_sigchld(sd_event_source *source, void *userdata);
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
static int manager_run_environment_generators(Manager *m);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
static int manager_run_generators(Manager *m);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2015-03-14 03:11:09 +01:00
|
|
|
static void manager_watch_jobs_in_progress(Manager *m) {
|
2014-01-29 00:26:06 +01:00
|
|
|
usec_t next;
|
2015-04-29 20:29:18 +02:00
|
|
|
int r;
|
2014-01-29 00:26:06 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2016-11-02 10:50:20 +01:00
|
|
|
/* We do not want to show the cylon animation if the user
|
|
|
|
* needs to confirm service executions otherwise confirmation
|
|
|
|
* messages will be screwed by the cylon animation. */
|
2016-11-15 09:29:04 +01:00
|
|
|
if (!manager_is_confirm_spawn_disabled(m))
|
2016-11-02 10:50:20 +01:00
|
|
|
return;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
if (m->jobs_in_progress_event_source)
|
2015-03-14 03:11:09 +01:00
|
|
|
return;
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2014-01-29 00:26:06 +01:00
|
|
|
next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC;
|
2015-04-29 20:29:18 +02:00
|
|
|
r = sd_event_add_time(
|
2014-03-24 02:49:09 +01:00
|
|
|
m->event,
|
|
|
|
&m->jobs_in_progress_event_source,
|
|
|
|
CLOCK_MONOTONIC,
|
|
|
|
next, 0,
|
|
|
|
manager_dispatch_jobs_in_progress, m);
|
2015-04-29 20:29:18 +02:00
|
|
|
if (r < 0)
|
|
|
|
return;
|
2015-04-29 16:05:32 +02:00
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->jobs_in_progress_event_source, "manager-jobs-in-progress");
|
2013-02-28 00:03:22 +01:00
|
|
|
}
|
|
|
|
|
2017-12-14 19:02:29 +01:00
|
|
|
#define CYLON_BUFFER_EXTRA (2*STRLEN(ANSI_RED) + STRLEN(ANSI_HIGHLIGHT_RED) + 2*STRLEN(ANSI_NORMAL))
|
2013-02-28 00:03:22 +01:00
|
|
|
|
|
|
|
static void draw_cylon(char buffer[], size_t buflen, unsigned width, unsigned pos) {
|
|
|
|
char *p = buffer;
|
|
|
|
|
|
|
|
assert(buflen >= CYLON_BUFFER_EXTRA + width + 1);
|
|
|
|
assert(pos <= width+1); /* 0 or width+1 mean that the center light is behind the corner */
|
|
|
|
|
|
|
|
if (pos > 1) {
|
2013-03-05 15:52:44 +01:00
|
|
|
if (pos > 2)
|
|
|
|
p = mempset(p, ' ', pos-2);
|
2016-06-10 18:33:15 +02:00
|
|
|
if (log_get_show_color())
|
|
|
|
p = stpcpy(p, ANSI_RED);
|
2013-02-28 00:03:22 +01:00
|
|
|
*p++ = '*';
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pos > 0 && pos <= width) {
|
2016-06-10 18:33:15 +02:00
|
|
|
if (log_get_show_color())
|
|
|
|
p = stpcpy(p, ANSI_HIGHLIGHT_RED);
|
2013-02-28 00:03:22 +01:00
|
|
|
*p++ = '*';
|
|
|
|
}
|
|
|
|
|
2016-06-10 18:33:15 +02:00
|
|
|
if (log_get_show_color())
|
|
|
|
p = stpcpy(p, ANSI_NORMAL);
|
2013-02-28 00:03:22 +01:00
|
|
|
|
|
|
|
if (pos < width) {
|
2016-06-10 18:33:15 +02:00
|
|
|
if (log_get_show_color())
|
|
|
|
p = stpcpy(p, ANSI_RED);
|
2013-02-28 00:03:22 +01:00
|
|
|
*p++ = '*';
|
2013-03-05 15:52:44 +01:00
|
|
|
if (pos < width-1)
|
|
|
|
p = mempset(p, ' ', width-1-pos);
|
2016-06-10 18:33:15 +02:00
|
|
|
if (log_get_show_color())
|
|
|
|
strcpy(p, ANSI_NORMAL);
|
2013-02-28 00:03:22 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-28 04:48:18 +01:00
|
|
|
void manager_flip_auto_status(Manager *m, bool enable) {
|
2014-03-12 20:55:13 +01:00
|
|
|
assert(m);
|
|
|
|
|
2014-01-28 04:48:18 +01:00
|
|
|
if (enable) {
|
|
|
|
if (m->show_status == SHOW_STATUS_AUTO)
|
|
|
|
manager_set_show_status(m, SHOW_STATUS_TEMPORARY);
|
|
|
|
} else {
|
|
|
|
if (m->show_status == SHOW_STATUS_TEMPORARY)
|
|
|
|
manager_set_show_status(m, SHOW_STATUS_AUTO);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
static void manager_print_jobs_in_progress(Manager *m) {
|
2013-11-19 21:12:59 +01:00
|
|
|
_cleanup_free_ char *job_of_n = NULL;
|
2013-02-28 00:03:22 +01:00
|
|
|
Iterator i;
|
|
|
|
Job *j;
|
|
|
|
unsigned counter = 0, print_nr;
|
|
|
|
char cylon[6 + CYLON_BUFFER_EXTRA + 1];
|
|
|
|
unsigned cylon_pos;
|
2014-01-29 00:25:39 +01:00
|
|
|
char time[FORMAT_TIMESPAN_MAX], limit[FORMAT_TIMESPAN_MAX] = "no limit";
|
|
|
|
uint64_t x;
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
2015-01-05 17:22:10 +01:00
|
|
|
assert(m->n_running_jobs > 0);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2014-01-28 04:48:18 +01:00
|
|
|
manager_flip_auto_status(m, true);
|
2014-01-28 04:27:07 +01:00
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
print_nr = (m->jobs_in_progress_iteration / JOBS_IN_PROGRESS_PERIOD_DIVISOR) % m->n_running_jobs;
|
|
|
|
|
|
|
|
HASHMAP_FOREACH(j, m->jobs, i)
|
|
|
|
if (j->state == JOB_RUNNING && counter++ == print_nr)
|
|
|
|
break;
|
|
|
|
|
2013-03-02 12:44:41 +01:00
|
|
|
/* m->n_running_jobs must be consistent with the contents of m->jobs,
|
|
|
|
* so the above loop must have succeeded in finding j. */
|
|
|
|
assert(counter == print_nr + 1);
|
2013-10-12 01:33:48 +02:00
|
|
|
assert(j);
|
2013-03-01 14:57:16 +01:00
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
cylon_pos = m->jobs_in_progress_iteration % 14;
|
|
|
|
if (cylon_pos >= 8)
|
|
|
|
cylon_pos = 14 - cylon_pos;
|
|
|
|
draw_cylon(cylon, sizeof(cylon), 6, cylon_pos);
|
|
|
|
|
2014-01-29 00:25:39 +01:00
|
|
|
m->jobs_in_progress_iteration++;
|
|
|
|
|
2015-03-14 03:21:52 +01:00
|
|
|
if (m->n_running_jobs > 1) {
|
|
|
|
if (asprintf(&job_of_n, "(%u of %u) ", counter, m->n_running_jobs) < 0)
|
|
|
|
job_of_n = NULL;
|
|
|
|
}
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2014-01-29 00:25:39 +01:00
|
|
|
format_timespan(time, sizeof(time), now(CLOCK_MONOTONIC) - j->begin_usec, 1*USEC_PER_SEC);
|
|
|
|
if (job_get_timeout(j, &x) > 0)
|
|
|
|
format_timespan(limit, sizeof(limit), x - j->begin_usec, 1*USEC_PER_SEC);
|
|
|
|
|
2014-10-28 04:02:54 +01:00
|
|
|
manager_status_printf(m, STATUS_TYPE_EPHEMERAL, cylon,
|
2014-01-29 00:25:39 +01:00
|
|
|
"%sA %s job is running for %s (%s / %s)",
|
|
|
|
strempty(job_of_n),
|
|
|
|
job_type_to_string(j->type),
|
|
|
|
unit_description(j->unit),
|
|
|
|
time, limit);
|
2013-02-28 00:03:22 +01:00
|
|
|
}
|
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
static int have_ask_password(void) {
|
|
|
|
_cleanup_closedir_ DIR *dir;
|
2016-12-09 10:04:30 +01:00
|
|
|
struct dirent *de;
|
2014-10-26 02:30:51 +02:00
|
|
|
|
|
|
|
dir = opendir("/run/systemd/ask-password");
|
|
|
|
if (!dir) {
|
|
|
|
if (errno == ENOENT)
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
2016-12-09 10:04:30 +01:00
|
|
|
FOREACH_DIRENT_ALL(de, dir, return -errno) {
|
2014-10-26 02:30:51 +02:00
|
|
|
if (startswith(de->d_name, "ask."))
|
|
|
|
return true;
|
|
|
|
}
|
2016-12-09 10:04:30 +01:00
|
|
|
return false;
|
2014-10-26 02:30:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int manager_dispatch_ask_password_fd(sd_event_source *source,
|
|
|
|
int fd, uint32_t revents, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2017-12-12 23:21:09 +01:00
|
|
|
(void) flush_fd(fd);
|
2014-10-26 02:30:51 +02:00
|
|
|
|
|
|
|
m->have_ask_password = have_ask_password();
|
|
|
|
if (m->have_ask_password < 0)
|
|
|
|
/* Log error but continue. Negative have_ask_password
|
|
|
|
* is treated as unknown status. */
|
2014-11-28 17:09:20 +01:00
|
|
|
log_error_errno(m->have_ask_password, "Failed to list /run/systemd/ask-password: %m");
|
2014-10-26 02:30:51 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void manager_close_ask_password(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
m->ask_password_event_source = sd_event_source_unref(m->ask_password_event_source);
|
2015-08-30 22:13:55 +02:00
|
|
|
m->ask_password_inotify_fd = safe_close(m->ask_password_inotify_fd);
|
2014-10-26 02:30:51 +02:00
|
|
|
m->have_ask_password = -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int manager_check_ask_password(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
if (!m->ask_password_event_source) {
|
|
|
|
assert(m->ask_password_inotify_fd < 0);
|
|
|
|
|
|
|
|
mkdir_p_label("/run/systemd/ask-password", 0755);
|
|
|
|
|
|
|
|
m->ask_password_inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (m->ask_password_inotify_fd < 0)
|
|
|
|
return log_error_errno(errno, "inotify_init1() failed: %m");
|
2014-10-26 02:30:51 +02:00
|
|
|
|
|
|
|
if (inotify_add_watch(m->ask_password_inotify_fd, "/run/systemd/ask-password", IN_CREATE|IN_DELETE|IN_MOVE) < 0) {
|
2014-11-28 19:29:59 +01:00
|
|
|
log_error_errno(errno, "Failed to add watch on /run/systemd/ask-password: %m");
|
2014-10-26 02:30:51 +02:00
|
|
|
manager_close_ask_password(m);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = sd_event_add_io(m->event, &m->ask_password_event_source,
|
|
|
|
m->ask_password_inotify_fd, EPOLLIN,
|
|
|
|
manager_dispatch_ask_password_fd, m);
|
|
|
|
if (r < 0) {
|
2014-11-28 19:29:59 +01:00
|
|
|
log_error_errno(errno, "Failed to add event source for /run/systemd/ask-password: %m");
|
2014-10-26 02:30:51 +02:00
|
|
|
manager_close_ask_password(m);
|
|
|
|
return -errno;
|
|
|
|
}
|
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(m->ask_password_event_source, "manager-ask-password");
|
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
/* Queries might have been added meanwhile... */
|
|
|
|
manager_dispatch_ask_password_fd(m->ask_password_event_source,
|
|
|
|
m->ask_password_inotify_fd, EPOLLIN, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
return m->have_ask_password;
|
|
|
|
}
|
|
|
|
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
static int manager_watch_idle_pipe(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
|
|
|
|
|
|
|
if (m->idle_pipe_event_source)
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (m->idle_pipe[2] < 0)
|
|
|
|
return 0;
|
|
|
|
|
2014-02-19 23:54:58 +01:00
|
|
|
r = sd_event_add_io(m->event, &m->idle_pipe_event_source, m->idle_pipe[2], EPOLLIN, manager_dispatch_idle_pipe_fd, m);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to watch idle pipe: %m");
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(m->idle_pipe_event_source, "manager-idle-pipe");
|
|
|
|
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static void manager_close_idle_pipe(Manager *m) {
|
|
|
|
assert(m);
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
|
2015-09-11 18:15:58 +02:00
|
|
|
m->idle_pipe_event_source = sd_event_source_unref(m->idle_pipe_event_source);
|
|
|
|
|
2014-03-24 03:22:44 +01:00
|
|
|
safe_close_pair(m->idle_pipe);
|
|
|
|
safe_close_pair(m->idle_pipe + 2);
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
}
|
|
|
|
|
2012-11-25 00:32:40 +01:00
|
|
|
static int manager_setup_time_change(Manager *m) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int r;
|
2013-03-25 00:59:00 +01:00
|
|
|
|
|
|
|
/* We only care for the cancellation event, hence we set the
|
|
|
|
* timeout to the latest possible value. */
|
|
|
|
struct itimerspec its = {
|
|
|
|
.it_value.tv_sec = TIME_T_MAX,
|
|
|
|
};
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
|
|
|
assert_cc(sizeof(time_t) == sizeof(TIME_T_MAX));
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags)
|
2014-01-07 14:41:24 +01:00
|
|
|
return 0;
|
|
|
|
|
2012-11-25 00:32:40 +01:00
|
|
|
/* Uses TFD_TIMER_CANCEL_ON_SET to get notifications whenever
|
|
|
|
* CLOCK_REALTIME makes a jump relative to CLOCK_MONOTONIC */
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
m->time_change_fd = timerfd_create(CLOCK_REALTIME, TFD_NONBLOCK|TFD_CLOEXEC);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (m->time_change_fd < 0)
|
|
|
|
return log_error_errno(errno, "Failed to create timerfd: %m");
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
if (timerfd_settime(m->time_change_fd, TFD_TIMER_ABSTIME|TFD_TIMER_CANCEL_ON_SET, &its, NULL) < 0) {
|
2014-11-28 19:29:59 +01:00
|
|
|
log_debug_errno(errno, "Failed to set up TFD_TIMER_CANCEL_ON_SET, ignoring: %m");
|
2014-03-18 19:22:43 +01:00
|
|
|
m->time_change_fd = safe_close(m->time_change_fd);
|
2012-11-25 00:32:40 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-02-19 23:54:58 +01:00
|
|
|
r = sd_event_add_io(m->event, &m->time_change_event_source, m->time_change_fd, EPOLLIN, manager_dispatch_time_change_fd, m);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to create time change event source: %m");
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(m->time_change_event_source, "manager-time-change");
|
|
|
|
|
2012-11-25 00:32:40 +01:00
|
|
|
log_debug("Set up TFD_TIMER_CANCEL_ON_SET timerfd.");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-13 02:06:27 +02:00
|
|
|
static int enable_special_signals(Manager *m) {
|
2013-11-19 21:12:59 +01:00
|
|
|
_cleanup_close_ int fd = -1;
|
2010-04-13 02:06:27 +02:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags)
|
2016-01-04 19:39:55 +01:00
|
|
|
return 0;
|
|
|
|
|
2012-04-13 16:53:49 +02:00
|
|
|
/* Enable that we get SIGINT on control-alt-del. In containers
|
2012-09-17 18:28:40 +02:00
|
|
|
* this will fail with EPERM (older) or EINVAL (newer), so
|
|
|
|
* ignore that. */
|
2017-10-04 16:01:32 +02:00
|
|
|
if (reboot(RB_DISABLE_CAD) < 0 && !IN_SET(errno, EPERM, EINVAL))
|
2014-11-28 19:29:59 +01:00
|
|
|
log_warning_errno(errno, "Failed to enable ctrl-alt-del handling: %m");
|
2010-04-13 02:06:27 +02:00
|
|
|
|
2012-04-13 16:53:49 +02:00
|
|
|
fd = open_terminal("/dev/tty0", O_RDWR|O_NOCTTY|O_CLOEXEC);
|
|
|
|
if (fd < 0) {
|
|
|
|
/* Support systems without virtual console */
|
|
|
|
if (fd != -ENOENT)
|
2014-11-28 19:29:59 +01:00
|
|
|
log_warning_errno(errno, "Failed to open /dev/tty0: %m");
|
2012-04-13 16:53:49 +02:00
|
|
|
} else {
|
2010-04-13 02:06:27 +02:00
|
|
|
/* Enable that we get SIGWINCH on kbrequest */
|
|
|
|
if (ioctl(fd, KDSIGACCEPT, SIGWINCH) < 0)
|
2014-11-28 19:29:59 +01:00
|
|
|
log_warning_errno(errno, "Failed to enable kbrequest handling: %m");
|
2010-04-13 02:06:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-09 09:32:03 +01:00
|
|
|
#define RTSIG_IF_AVAILABLE(signum) (signum <= SIGRTMAX ? signum : -1)
|
|
|
|
|
2010-01-28 02:01:15 +01:00
|
|
|
static int manager_setup_signals(Manager *m) {
|
2013-03-25 00:59:00 +01:00
|
|
|
struct sigaction sa = {
|
|
|
|
.sa_handler = SIG_DFL,
|
|
|
|
.sa_flags = SA_NOCLDSTOP|SA_RESTART,
|
|
|
|
};
|
2013-11-19 21:12:59 +01:00
|
|
|
sigset_t mask;
|
|
|
|
int r;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-01-28 02:01:15 +01:00
|
|
|
assert(m);
|
|
|
|
|
2010-04-13 02:36:56 +02:00
|
|
|
assert_se(sigaction(SIGCHLD, &sa, NULL) == 0);
|
|
|
|
|
2014-10-24 13:44:45 +02:00
|
|
|
/* We make liberal use of realtime signals here. On
|
|
|
|
* Linux/glibc we have 30 of them (with the exception of Linux
|
|
|
|
* on hppa, see below), between SIGRTMIN+0 ... SIGRTMIN+30
|
|
|
|
* (aka SIGRTMAX). */
|
2010-06-17 23:22:56 +02:00
|
|
|
|
2014-10-24 13:44:45 +02:00
|
|
|
assert_se(sigemptyset(&mask) == 0);
|
2010-06-17 23:22:56 +02:00
|
|
|
sigset_add_many(&mask,
|
|
|
|
SIGCHLD, /* Child died */
|
|
|
|
SIGTERM, /* Reexecute daemon */
|
|
|
|
SIGHUP, /* Reload configuration */
|
|
|
|
SIGUSR1, /* systemd/upstart: reconnect to D-Bus */
|
|
|
|
SIGUSR2, /* systemd: dump status */
|
|
|
|
SIGINT, /* Kernel sends us this on control-alt-del */
|
|
|
|
SIGWINCH, /* Kernel sends us this on kbrequest (alt-arrowup) */
|
|
|
|
SIGPWR, /* Some kernel drivers and upsd send us this on power failure */
|
2014-10-24 13:44:45 +02:00
|
|
|
|
2010-06-17 23:22:56 +02:00
|
|
|
SIGRTMIN+0, /* systemd: start default.target */
|
2010-10-14 00:54:48 +02:00
|
|
|
SIGRTMIN+1, /* systemd: isolate rescue.target */
|
2010-06-17 23:22:56 +02:00
|
|
|
SIGRTMIN+2, /* systemd: isolate emergency.target */
|
|
|
|
SIGRTMIN+3, /* systemd: start halt.target */
|
|
|
|
SIGRTMIN+4, /* systemd: start poweroff.target */
|
|
|
|
SIGRTMIN+5, /* systemd: start reboot.target */
|
2010-10-14 00:54:48 +02:00
|
|
|
SIGRTMIN+6, /* systemd: start kexec.target */
|
2014-10-24 13:44:45 +02:00
|
|
|
|
|
|
|
/* ... space for more special targets ... */
|
|
|
|
|
2010-10-14 00:54:48 +02:00
|
|
|
SIGRTMIN+13, /* systemd: Immediate halt */
|
|
|
|
SIGRTMIN+14, /* systemd: Immediate poweroff */
|
|
|
|
SIGRTMIN+15, /* systemd: Immediate reboot */
|
|
|
|
SIGRTMIN+16, /* systemd: Immediate kexec */
|
2014-10-24 13:44:45 +02:00
|
|
|
|
|
|
|
/* ... space for more immediate system state changes ... */
|
|
|
|
|
2011-02-09 12:12:30 +01:00
|
|
|
SIGRTMIN+20, /* systemd: enable status messages */
|
|
|
|
SIGRTMIN+21, /* systemd: disable status messages */
|
2011-07-23 04:15:38 +02:00
|
|
|
SIGRTMIN+22, /* systemd: set log level to LOG_DEBUG */
|
|
|
|
SIGRTMIN+23, /* systemd: set log level to LOG_INFO */
|
2012-10-18 01:19:35 +02:00
|
|
|
SIGRTMIN+24, /* systemd: Immediate exit (--user only) */
|
2014-10-24 13:44:45 +02:00
|
|
|
|
|
|
|
/* .. one free signal here ... */
|
|
|
|
|
2018-03-09 09:32:03 +01:00
|
|
|
/* Apparently Linux on hppa had fewer RT signals until v3.18,
|
|
|
|
* SIGRTMAX was SIGRTMIN+25, and then SIGRTMIN was lowered,
|
|
|
|
* see commit v3.17-7614-g1f25df2eff.
|
|
|
|
*
|
|
|
|
* We cannot unconditionally make use of those signals here,
|
|
|
|
* so let's use a runtime check. Since these commands are
|
|
|
|
* accessible by different means and only really a safety
|
|
|
|
* net, the missing functionality on hppa shouldn't matter.
|
|
|
|
*/
|
|
|
|
|
|
|
|
RTSIG_IF_AVAILABLE(SIGRTMIN+26), /* systemd: set log target to journal-or-kmsg */
|
|
|
|
RTSIG_IF_AVAILABLE(SIGRTMIN+27), /* systemd: set log target to console */
|
|
|
|
RTSIG_IF_AVAILABLE(SIGRTMIN+28), /* systemd: set log target to kmsg */
|
|
|
|
RTSIG_IF_AVAILABLE(SIGRTMIN+29), /* systemd: set log target to syslog-or-kmsg (obsolete) */
|
2014-10-24 13:44:45 +02:00
|
|
|
|
|
|
|
/* ... one free signal here SIGRTMIN+30 ... */
|
2010-06-17 23:22:56 +02:00
|
|
|
-1);
|
2010-01-28 02:01:15 +01:00
|
|
|
assert_se(sigprocmask(SIG_SETMASK, &mask, NULL) == 0);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
m->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
|
|
|
|
if (m->signal_fd < 0)
|
2010-01-28 02:01:15 +01:00
|
|
|
return -errno;
|
|
|
|
|
2014-02-19 23:54:58 +01:00
|
|
|
r = sd_event_add_io(m->event, &m->signal_event_source, m->signal_fd, EPOLLIN, manager_dispatch_signal_fd, m);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-01-28 02:01:15 +01:00
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(m->signal_event_source, "manager-signal");
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
/* Process signals a bit earlier than the rest of things, but later than notify_fd processing, so that the
|
|
|
|
* notify processing can still figure out to which process/service a message belongs, before we reap the
|
|
|
|
* process. Also, process this before handling cgroup notifications, so that we always collect child exit
|
|
|
|
* status information before detecting that there's no process in a cgroup. */
|
|
|
|
r = sd_event_source_set_priority(m->signal_event_source, SD_EVENT_PRIORITY_NORMAL-6);
|
2013-11-25 15:35:10 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
2010-04-13 02:06:27 +02:00
|
|
|
return enable_special_signals(m);
|
2010-02-14 22:39:40 +01:00
|
|
|
|
2010-01-28 02:01:15 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-10 18:28:42 +01:00
|
|
|
static void manager_sanitize_environment(Manager *m) {
|
2014-01-12 13:10:40 +01:00
|
|
|
assert(m);
|
|
|
|
|
2018-01-10 18:28:42 +01:00
|
|
|
/* Let's remove some environment variables that we need ourselves to communicate with our clients */
|
2014-01-12 13:10:40 +01:00
|
|
|
strv_env_unset_many(
|
|
|
|
m->environment,
|
2018-01-10 18:28:42 +01:00
|
|
|
"EXIT_CODE",
|
|
|
|
"EXIT_STATUS",
|
|
|
|
"INVOCATION_ID",
|
|
|
|
"JOURNAL_STREAM",
|
|
|
|
"LISTEN_FDNAMES",
|
|
|
|
"LISTEN_FDS",
|
|
|
|
"LISTEN_PID",
|
2014-01-12 13:10:40 +01:00
|
|
|
"MAINPID",
|
|
|
|
"MANAGERPID",
|
2018-01-10 18:28:42 +01:00
|
|
|
"NOTIFY_SOCKET",
|
|
|
|
"REMOTE_ADDR",
|
|
|
|
"REMOTE_PORT",
|
|
|
|
"SERVICE_RESULT",
|
2014-01-12 13:10:40 +01:00
|
|
|
"WATCHDOG_PID",
|
|
|
|
"WATCHDOG_USEC",
|
|
|
|
NULL);
|
2018-01-10 18:28:42 +01:00
|
|
|
|
|
|
|
/* Let's order the environment alphabetically, just to make it pretty */
|
|
|
|
strv_sort(m->environment);
|
2014-01-12 13:10:40 +01:00
|
|
|
}
|
|
|
|
|
2013-07-26 05:22:22 +02:00
|
|
|
static int manager_default_environment(Manager *m) {
|
2012-04-11 12:56:51 +02:00
|
|
|
assert(m);
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m)) {
|
2013-07-26 05:22:22 +02:00
|
|
|
/* The system manager always starts with a clean
|
|
|
|
* environment for its children. It does not import
|
2017-02-10 21:41:42 +01:00
|
|
|
* the kernel's or the parents' exported variables.
|
2013-07-26 05:22:22 +02:00
|
|
|
*
|
2017-02-10 21:41:42 +01:00
|
|
|
* The initial passed environment is untouched to keep
|
2013-07-26 05:22:22 +02:00
|
|
|
* /proc/self/environ valid; it is used for tagging
|
|
|
|
* the init process inside containers. */
|
2013-09-15 17:56:19 +02:00
|
|
|
m->environment = strv_new("PATH=" DEFAULT_PATH,
|
|
|
|
NULL);
|
2013-07-26 05:22:22 +02:00
|
|
|
|
|
|
|
/* Import locale variables LC_*= from configuration */
|
|
|
|
locale_setup(&m->environment);
|
2017-02-10 21:41:42 +01:00
|
|
|
} else
|
2013-07-26 05:22:22 +02:00
|
|
|
/* The user manager passes its own environment
|
|
|
|
* along to its children. */
|
|
|
|
m->environment = strv_copy(environ);
|
2014-01-12 12:39:56 +01:00
|
|
|
|
2013-07-26 05:22:22 +02:00
|
|
|
if (!m->environment)
|
|
|
|
return -ENOMEM;
|
2013-02-11 23:53:14 +01:00
|
|
|
|
2018-01-10 18:28:42 +01:00
|
|
|
manager_sanitize_environment(m);
|
2013-11-21 19:31:46 +01:00
|
|
|
|
2013-07-26 05:22:22 +02:00
|
|
|
return 0;
|
2012-04-11 12:56:51 +02:00
|
|
|
}
|
|
|
|
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
static int manager_setup_prefix(Manager *m) {
|
|
|
|
struct table_entry {
|
|
|
|
uint64_t type;
|
|
|
|
const char *suffix;
|
|
|
|
};
|
|
|
|
|
2017-09-28 16:58:43 +02:00
|
|
|
static const struct table_entry paths_system[_EXEC_DIRECTORY_TYPE_MAX] = {
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
[EXEC_DIRECTORY_RUNTIME] = { SD_PATH_SYSTEM_RUNTIME, NULL },
|
|
|
|
[EXEC_DIRECTORY_STATE] = { SD_PATH_SYSTEM_STATE_PRIVATE, NULL },
|
|
|
|
[EXEC_DIRECTORY_CACHE] = { SD_PATH_SYSTEM_STATE_CACHE, NULL },
|
|
|
|
[EXEC_DIRECTORY_LOGS] = { SD_PATH_SYSTEM_STATE_LOGS, NULL },
|
|
|
|
[EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_SYSTEM_CONFIGURATION, NULL },
|
|
|
|
};
|
|
|
|
|
2017-09-28 16:58:43 +02:00
|
|
|
static const struct table_entry paths_user[_EXEC_DIRECTORY_TYPE_MAX] = {
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
[EXEC_DIRECTORY_RUNTIME] = { SD_PATH_USER_RUNTIME, NULL },
|
|
|
|
[EXEC_DIRECTORY_STATE] = { SD_PATH_USER_CONFIGURATION, NULL },
|
2017-10-02 11:27:03 +02:00
|
|
|
[EXEC_DIRECTORY_CACHE] = { SD_PATH_USER_STATE_CACHE, NULL },
|
|
|
|
[EXEC_DIRECTORY_LOGS] = { SD_PATH_USER_CONFIGURATION, "log" },
|
|
|
|
[EXEC_DIRECTORY_CONFIGURATION] = { SD_PATH_USER_CONFIGURATION, NULL },
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct table_entry *p;
|
|
|
|
ExecDirectoryType i;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
p = paths_system;
|
|
|
|
else
|
|
|
|
p = paths_user;
|
|
|
|
|
2017-09-28 16:58:43 +02:00
|
|
|
for (i = 0; i < _EXEC_DIRECTORY_TYPE_MAX; i++) {
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
r = sd_path_home(p[i].type, p[i].suffix, &m->prefix[i]);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:24:00 +01:00
|
|
|
static int manager_setup_run_queue(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(!m->run_queue_event_source);
|
|
|
|
|
|
|
|
r = sd_event_add_defer(m->event, &m->run_queue_event_source, manager_dispatch_run_queue, m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = sd_event_source_set_priority(m->run_queue_event_source, SD_EVENT_PRIORITY_IDLE);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = sd_event_source_set_enabled(m->run_queue_event_source, SD_EVENT_OFF);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->run_queue_event_source, "manager-run-queue");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
static int manager_setup_sigchld_event_source(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(!m->sigchld_event_source);
|
|
|
|
|
|
|
|
r = sd_event_add_defer(m->event, &m->sigchld_event_source, manager_dispatch_sigchld, m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = sd_event_source_set_priority(m->sigchld_event_source, SD_EVENT_PRIORITY_NORMAL-7);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->sigchld_event_source, "manager-sigchld");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
int manager_new(UnitFileScope scope, unsigned test_run_flags, Manager **_m) {
|
2018-03-09 21:55:55 +01:00
|
|
|
_cleanup_(manager_freep) Manager *m = NULL;
|
2013-11-30 03:53:42 +01:00
|
|
|
int r;
|
2010-03-31 16:29:55 +02:00
|
|
|
|
|
|
|
assert(_m);
|
2016-02-24 21:24:23 +01:00
|
|
|
assert(IN_SET(scope, UNIT_FILE_SYSTEM, UNIT_FILE_USER));
|
2010-01-28 02:01:15 +01:00
|
|
|
|
2012-09-13 18:54:32 +02:00
|
|
|
m = new0(Manager, 1);
|
|
|
|
if (!m)
|
2010-03-31 16:29:55 +02:00
|
|
|
return -ENOMEM;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
m->unit_file_scope = scope;
|
2010-04-21 03:27:44 +02:00
|
|
|
m->exit_code = _MANAGER_EXIT_CODE_INVALID;
|
2014-03-24 16:22:34 +01:00
|
|
|
m->default_timer_accuracy_usec = USEC_PER_MINUTE;
|
2018-02-15 11:43:08 +01:00
|
|
|
m->default_memory_accounting = MEMORY_ACCOUNTING_DEFAULT;
|
2015-11-13 19:28:32 +01:00
|
|
|
m->default_tasks_accounting = true;
|
2016-07-19 17:29:00 +02:00
|
|
|
m->default_tasks_max = UINT64_MAX;
|
2017-09-04 18:19:07 +02:00
|
|
|
m->default_timeout_start_usec = DEFAULT_TIMEOUT_USEC;
|
|
|
|
m->default_timeout_stop_usec = DEFAULT_TIMEOUT_USEC;
|
|
|
|
m->default_restart_usec = DEFAULT_RESTART_USEC;
|
2010-04-13 02:06:27 +02:00
|
|
|
|
2017-10-03 10:41:51 +02:00
|
|
|
#if ENABLE_EFI
|
2016-02-24 21:24:23 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0)
|
2017-11-20 21:01:13 +01:00
|
|
|
boot_timestamps(m->timestamps + MANAGER_TIMESTAMP_USERSPACE,
|
|
|
|
m->timestamps + MANAGER_TIMESTAMP_FIRMWARE,
|
|
|
|
m->timestamps + MANAGER_TIMESTAMP_LOADER);
|
2016-02-24 21:24:23 +01:00
|
|
|
#endif
|
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
/* Prepare log fields we can use for structured logging */
|
2016-02-24 21:24:23 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m)) {
|
|
|
|
m->unit_log_field = "UNIT=";
|
|
|
|
m->unit_log_format_string = "UNIT=%s";
|
2016-08-30 23:18:46 +02:00
|
|
|
|
|
|
|
m->invocation_log_field = "INVOCATION_ID=";
|
2017-09-20 18:27:53 +02:00
|
|
|
m->invocation_log_format_string = "INVOCATION_ID=%s";
|
2016-02-24 21:24:23 +01:00
|
|
|
} else {
|
|
|
|
m->unit_log_field = "USER_UNIT=";
|
|
|
|
m->unit_log_format_string = "USER_UNIT=%s";
|
2016-08-30 23:18:46 +02:00
|
|
|
|
|
|
|
m->invocation_log_field = "USER_INVOCATION_ID=";
|
2017-09-20 18:27:53 +02:00
|
|
|
m->invocation_log_format_string = "USER_INVOCATION_ID=%s";
|
2016-02-24 21:24:23 +01:00
|
|
|
}
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
m->idle_pipe[0] = m->idle_pipe[1] = m->idle_pipe[2] = m->idle_pipe[3] = -1;
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
m->pin_cgroupfs_fd = m->notify_fd = m->cgroups_agent_fd = m->signal_fd = m->time_change_fd =
|
2016-09-09 16:16:26 +02:00
|
|
|
m->dev_autofs_fd = m->private_listen_fd = m->cgroup_inotify_fd =
|
2016-05-04 20:43:23 +02:00
|
|
|
m->ask_password_inotify_fd = -1;
|
2015-06-01 13:48:01 +02:00
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
m->user_lookup_fds[0] = m->user_lookup_fds[1] = -1;
|
|
|
|
|
2010-02-01 03:33:24 +01:00
|
|
|
m->current_job_id = 1; /* start as id #1, so that we can leave #0 around as "null-like" value */
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
m->have_ask_password = -EINVAL; /* we don't know */
|
2015-09-01 02:34:19 +02:00
|
|
|
m->first_boot = -1;
|
2014-10-26 02:30:51 +02:00
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
m->test_run_flags = test_run_flags;
|
2014-01-07 14:41:24 +01:00
|
|
|
|
2015-01-28 02:18:59 +01:00
|
|
|
/* Reboot immediately if the user hits C-A-D more often than 7x per 2s */
|
|
|
|
RATELIMIT_INIT(m->ctrl_alt_del_ratelimit, 2 * USEC_PER_SEC, 7);
|
|
|
|
|
2013-07-26 05:22:22 +02:00
|
|
|
r = manager_default_environment(m);
|
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2010-05-09 23:53:52 +02:00
|
|
|
|
2014-08-13 01:00:18 +02:00
|
|
|
r = hashmap_ensure_allocated(&m->units, &string_hash_ops);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2014-08-13 01:00:18 +02:00
|
|
|
r = hashmap_ensure_allocated(&m->jobs, NULL);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2018-02-08 18:58:35 +01:00
|
|
|
r = hashmap_ensure_allocated(&m->cgroup_unit, &path_hash_ops);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2014-08-13 01:00:18 +02:00
|
|
|
r = hashmap_ensure_allocated(&m->watch_bus, &string_hash_ops);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2010-04-15 23:16:16 +02:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = manager_setup_prefix(m);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
m->udev = udev_new();
|
|
|
|
if (!m->udev)
|
|
|
|
return -ENOMEM;
|
2013-11-25 15:22:41 +01:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = sd_event_default(&m->event);
|
2012-11-25 00:32:40 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = manager_setup_run_queue(m);
|
2018-03-08 07:29:19 +01:00
|
|
|
if (r < 0)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
2010-03-31 16:29:55 +02:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
if (test_run_flags == MANAGER_TEST_RUN_MINIMAL) {
|
|
|
|
m->cgroup_root = strdup("");
|
|
|
|
if (!m->cgroup_root)
|
|
|
|
return -ENOMEM;
|
|
|
|
} else {
|
|
|
|
r = manager_setup_signals(m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = manager_setup_cgroup(m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2018-01-23 18:18:13 +01:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = manager_setup_time_change(m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-11-25 21:08:39 +01:00
|
|
|
|
2018-03-10 11:02:18 +01:00
|
|
|
r = manager_setup_sigchld_event_source(m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
2017-12-07 11:09:09 +01:00
|
|
|
|
2017-12-06 23:24:29 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m) && test_run_flags == 0) {
|
core: implement /run/systemd/units/-based path for passing unit info from PID 1 to journald
And let's make use of it to implement two new unit settings with it:
1. LogLevelMax= is a new per-unit setting that may be used to configure
log priority filtering: set it to LogLevelMax=notice and only
messages of level "notice" and lower (i.e. more important) will be
processed, all others are dropped.
2. LogExtraFields= is a new per-unit setting for configuring per-unit
journal fields, that are implicitly included in every log record
generated by the unit's processes. It takes field/value pairs in the
form of FOO=BAR.
Also, related to this, one exisiting unit setting is ported to this new
facility:
3. The invocation ID is now pulled from /run/systemd/units/ instead of
cgroupfs xattrs. This substantially relaxes requirements of systemd
on the kernel version and the privileges it runs with (specifically,
cgroupfs xattrs are not available in containers, since they are
stored in kernel memory, and hence are unsafe to permit to lesser
privileged code).
/run/systemd/units/ is a new directory, which contains a number of files
and symlinks encoding the above information. PID 1 creates and manages
these files, and journald reads them from there.
Note that this is supposed to be a direct path between PID 1 and the
journal only, due to the special runtime environment the journal runs
in. Normally, today we shouldn't introduce new interfaces that (mis-)use
a file system as IPC framework, and instead just an IPC system, but this
is very hard to do between the journal and PID 1, as long as the IPC
system is a subject PID 1 manages, and itself a client to the journal.
This patch cleans up a couple of types used in journal code:
specifically we switch to size_t for a couple of memory-sizing values,
as size_t is the right choice for everything that is memory.
Fixes: #4089
Fixes: #3041
Fixes: #4441
2017-11-02 19:43:32 +01:00
|
|
|
r = mkdir_label("/run/systemd/units", 0755);
|
|
|
|
if (r < 0 && r != -EEXIST)
|
2018-03-09 21:55:55 +01:00
|
|
|
return r;
|
core: implement /run/systemd/units/-based path for passing unit info from PID 1 to journald
And let's make use of it to implement two new unit settings with it:
1. LogLevelMax= is a new per-unit setting that may be used to configure
log priority filtering: set it to LogLevelMax=notice and only
messages of level "notice" and lower (i.e. more important) will be
processed, all others are dropped.
2. LogExtraFields= is a new per-unit setting for configuring per-unit
journal fields, that are implicitly included in every log record
generated by the unit's processes. It takes field/value pairs in the
form of FOO=BAR.
Also, related to this, one exisiting unit setting is ported to this new
facility:
3. The invocation ID is now pulled from /run/systemd/units/ instead of
cgroupfs xattrs. This substantially relaxes requirements of systemd
on the kernel version and the privileges it runs with (specifically,
cgroupfs xattrs are not available in containers, since they are
stored in kernel memory, and hence are unsafe to permit to lesser
privileged code).
/run/systemd/units/ is a new directory, which contains a number of files
and symlinks encoding the above information. PID 1 creates and manages
these files, and journald reads them from there.
Note that this is supposed to be a direct path between PID 1 and the
journal only, due to the special runtime environment the journal runs
in. Normally, today we shouldn't introduce new interfaces that (mis-)use
a file system as IPC framework, and instead just an IPC system, but this
is very hard to do between the journal and PID 1, as long as the IPC
system is a subject PID 1 manages, and itself a client to the journal.
This patch cleans up a couple of types used in journal code:
specifically we switch to size_t for a couple of memory-sizing values,
as size_t is the right choice for everything that is memory.
Fixes: #4089
Fixes: #3041
Fixes: #4441
2017-11-02 19:43:32 +01:00
|
|
|
}
|
|
|
|
|
2017-12-07 11:09:09 +01:00
|
|
|
m->taint_usr =
|
|
|
|
!in_initrd() &&
|
|
|
|
dir_is_empty("/usr") > 0;
|
|
|
|
|
2016-09-09 16:16:26 +02:00
|
|
|
/* Note that we do not set up the notify fd here. We do that after deserialization,
|
|
|
|
* since they might have gotten serialized across the reexec. */
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
|
2018-04-05 07:26:26 +02:00
|
|
|
*_m = TAKE_PTR(m);
|
|
|
|
|
2010-03-31 16:29:55 +02:00
|
|
|
return 0;
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
|
|
|
|
2013-12-21 00:19:37 +01:00
|
|
|
static int manager_setup_notify(Manager *m) {
|
2014-03-19 22:46:45 +01:00
|
|
|
int r;
|
2013-12-21 00:19:37 +01:00
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags)
|
2014-01-07 14:41:24 +01:00
|
|
|
return 0;
|
|
|
|
|
2013-12-21 00:19:37 +01:00
|
|
|
if (m->notify_fd < 0) {
|
|
|
|
_cleanup_close_ int fd = -1;
|
2015-06-08 20:53:16 +02:00
|
|
|
union sockaddr_union sa = {
|
2014-03-19 22:46:45 +01:00
|
|
|
.sa.sa_family = AF_UNIX,
|
|
|
|
};
|
2014-08-21 16:51:44 +02:00
|
|
|
static const int one = 1;
|
2013-12-21 00:19:37 +01:00
|
|
|
|
|
|
|
/* First free all secondary fields */
|
2015-09-08 18:43:11 +02:00
|
|
|
m->notify_socket = mfree(m->notify_socket);
|
2013-12-21 00:19:37 +01:00
|
|
|
m->notify_event_source = sd_event_source_unref(m->notify_event_source);
|
|
|
|
|
|
|
|
fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (fd < 0)
|
|
|
|
return log_error_errno(errno, "Failed to allocate notification socket: %m");
|
2013-12-21 00:19:37 +01:00
|
|
|
|
2015-10-30 11:27:29 +01:00
|
|
|
fd_inc_rcvbuf(fd, NOTIFY_RCVBUF_SIZE);
|
|
|
|
|
core: add {State,Cache,Log,Configuration}Directory= (#6384)
This introduces {State,Cache,Log,Configuration}Directory= those are
similar to RuntimeDirectory=. They create the directories under
/var/lib, /var/cache/, /var/log, or /etc, respectively, with the mode
specified in {State,Cache,Log,Configuration}DirectoryMode=.
This also fixes #6391.
2017-07-18 14:34:52 +02:00
|
|
|
m->notify_socket = strappend(m->prefix[EXEC_DIRECTORY_RUNTIME], "/systemd/notify");
|
2014-11-07 02:05:50 +01:00
|
|
|
if (!m->notify_socket)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
(void) mkdir_parents_label(m->notify_socket, 0755);
|
2014-11-07 16:32:06 +01:00
|
|
|
(void) unlink(m->notify_socket);
|
2014-03-19 22:46:45 +01:00
|
|
|
|
|
|
|
strncpy(sa.un.sun_path, m->notify_socket, sizeof(sa.un.sun_path)-1);
|
2016-05-05 22:24:36 +02:00
|
|
|
r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
|
2014-11-28 19:57:32 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
|
2013-12-21 00:19:37 +01:00
|
|
|
|
|
|
|
r = setsockopt(fd, SOL_SOCKET, SO_PASSCRED, &one, sizeof(one));
|
2014-11-28 19:57:32 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "SO_PASSCRED failed: %m");
|
2013-12-21 00:19:37 +01:00
|
|
|
|
2018-03-22 17:04:29 +01:00
|
|
|
m->notify_fd = TAKE_FD(fd);
|
2013-12-21 00:19:37 +01:00
|
|
|
|
|
|
|
log_debug("Using notification socket %s", m->notify_socket);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!m->notify_event_source) {
|
2014-02-19 23:54:58 +01:00
|
|
|
r = sd_event_add_io(m->event, &m->notify_event_source, m->notify_fd, EPOLLIN, manager_dispatch_notify_fd, m);
|
2014-11-28 19:20:59 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to allocate notify event source: %m");
|
2013-12-21 00:19:37 +01:00
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
/* Process notification messages a bit earlier than SIGCHLD, so that we can still identify to which
|
|
|
|
* service an exit message belongs. */
|
2018-01-23 18:18:13 +01:00
|
|
|
r = sd_event_source_set_priority(m->notify_event_source, SD_EVENT_PRIORITY_NORMAL-8);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to set priority of notify event source: %m");
|
2015-04-29 16:05:32 +02:00
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->notify_event_source, "manager-notify");
|
2013-12-21 00:19:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
static int manager_setup_cgroups_agent(Manager *m) {
|
|
|
|
|
|
|
|
static const union sockaddr_union sa = {
|
|
|
|
.un.sun_family = AF_UNIX,
|
|
|
|
.un.sun_path = "/run/systemd/cgroups-agent",
|
|
|
|
};
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* This creates a listening socket we receive cgroups agent messages on. We do not use D-Bus for delivering
|
|
|
|
* these messages from the cgroups agent binary to PID 1, as the cgroups agent binary is very short-living, and
|
|
|
|
* each instance of it needs a new D-Bus connection. Since D-Bus connections are SOCK_STREAM/AF_UNIX, on
|
|
|
|
* overloaded systems the backlog of the D-Bus socket becomes relevant, as not more than the configured number
|
|
|
|
* of D-Bus connections may be queued until the kernel will start dropping further incoming connections,
|
|
|
|
* possibly resulting in lost cgroups agent messages. To avoid this, we'll use a private SOCK_DGRAM/AF_UNIX
|
|
|
|
* socket, where no backlog is relevant as communication may take place without an actual connect() cycle, and
|
|
|
|
* we thus won't lose messages.
|
|
|
|
*
|
|
|
|
* Note that PID 1 will forward the agent message to system bus, so that the user systemd instance may listen
|
|
|
|
* to it. The system instance hence listens on this special socket, but the user instances listen on the system
|
|
|
|
* bus for these messages. */
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags)
|
2016-05-04 20:43:23 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
|
|
|
return 0;
|
|
|
|
|
2017-02-24 18:00:04 +01:00
|
|
|
r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
|
2017-02-24 17:52:58 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to determine whether unified cgroups hierarchy is used: %m");
|
|
|
|
if (r > 0) /* We don't need this anymore on the unified hierarchy */
|
2016-05-04 20:43:23 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (m->cgroups_agent_fd < 0) {
|
|
|
|
_cleanup_close_ int fd = -1;
|
|
|
|
|
|
|
|
/* First free all secondary fields */
|
|
|
|
m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
|
|
|
|
|
|
|
|
fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
|
|
|
|
if (fd < 0)
|
|
|
|
return log_error_errno(errno, "Failed to allocate cgroups agent socket: %m");
|
|
|
|
|
|
|
|
fd_inc_rcvbuf(fd, CGROUPS_AGENT_RCVBUF_SIZE);
|
|
|
|
|
|
|
|
(void) unlink(sa.un.sun_path);
|
|
|
|
|
|
|
|
/* Only allow root to connect to this socket */
|
|
|
|
RUN_WITH_UMASK(0077)
|
2016-05-05 22:24:36 +02:00
|
|
|
r = bind(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
|
2016-05-04 20:43:23 +02:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
|
|
|
|
|
|
|
|
m->cgroups_agent_fd = fd;
|
|
|
|
fd = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!m->cgroups_agent_event_source) {
|
|
|
|
r = sd_event_add_io(m->event, &m->cgroups_agent_event_source, m->cgroups_agent_fd, EPOLLIN, manager_dispatch_cgroups_agent_fd, m);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to allocate cgroups agent event source: %m");
|
|
|
|
|
|
|
|
/* Process cgroups notifications early, but after having processed service notification messages or
|
|
|
|
* SIGCHLD signals, so that a cgroup running empty is always just the last safety net of notification,
|
|
|
|
* and we collected the metadata the notification and SIGCHLD stuff offers first. Also see handling of
|
|
|
|
* cgroup inotify for the unified cgroup stuff. */
|
2017-09-26 22:43:08 +02:00
|
|
|
r = sd_event_source_set_priority(m->cgroups_agent_event_source, SD_EVENT_PRIORITY_NORMAL-4);
|
2016-05-04 20:43:23 +02:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to set priority of cgroups agent event source: %m");
|
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->cgroups_agent_event_source, "manager-cgroups-agent");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
static int manager_setup_user_lookup_fd(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* Set up the socket pair used for passing UID/GID resolution results from forked off processes to PID
|
|
|
|
* 1. Background: we can't do name lookups (NSS) from PID 1, since it might involve IPC and thus activation,
|
|
|
|
* and we might hence deadlock on ourselves. Hence we do all user/group lookups asynchronously from the forked
|
|
|
|
* off processes right before executing the binaries to start. In order to be able to clean up any IPC objects
|
|
|
|
* created by a unit (see RemoveIPC=) we need to know in PID 1 the used UID/GID of the executed processes,
|
|
|
|
* hence we establish this communication channel so that forked off processes can pass their UID/GID
|
|
|
|
* information back to PID 1. The forked off processes send their resolved UID/GID to PID 1 in a simple
|
|
|
|
* datagram, along with their unit name, so that we can share one communication socket pair among all units for
|
|
|
|
* this purpose.
|
|
|
|
*
|
|
|
|
* You might wonder why we need a communication channel for this that is independent of the usual notification
|
|
|
|
* socket scheme (i.e. $NOTIFY_SOCKET). The primary difference is about trust: data sent via the $NOTIFY_SOCKET
|
|
|
|
* channel is only accepted if it originates from the right unit and if reception was enabled for it. The user
|
|
|
|
* lookup socket OTOH is only accessible by PID 1 and its children until they exec(), and always available.
|
|
|
|
*
|
|
|
|
* Note that this function is called under two circumstances: when we first initialize (in which case we
|
|
|
|
* allocate both the socket pair and the event source to listen on it), and when we deserialize after a reload
|
|
|
|
* (in which case the socket pair already exists but we still need to allocate the event source for it). */
|
|
|
|
|
|
|
|
if (m->user_lookup_fds[0] < 0) {
|
|
|
|
|
|
|
|
/* Free all secondary fields */
|
|
|
|
safe_close_pair(m->user_lookup_fds);
|
|
|
|
m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
|
|
|
|
|
|
|
|
if (socketpair(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0, m->user_lookup_fds) < 0)
|
|
|
|
return log_error_errno(errno, "Failed to allocate user lookup socket: %m");
|
|
|
|
|
|
|
|
(void) fd_inc_rcvbuf(m->user_lookup_fds[0], NOTIFY_RCVBUF_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!m->user_lookup_event_source) {
|
|
|
|
r = sd_event_add_io(m->event, &m->user_lookup_event_source, m->user_lookup_fds[0], EPOLLIN, manager_dispatch_user_lookup_fd, m);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "Failed to allocate user lookup event source: %m");
|
|
|
|
|
|
|
|
/* Process even earlier than the notify event source, so that we always know first about valid UID/GID
|
|
|
|
* resolutions */
|
2018-01-23 18:15:16 +01:00
|
|
|
r = sd_event_source_set_priority(m->user_lookup_event_source, SD_EVENT_PRIORITY_NORMAL-11);
|
2016-08-01 19:24:40 +02:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "Failed to set priority ot user lookup event source: %m");
|
|
|
|
|
|
|
|
(void) sd_event_source_set_description(m->user_lookup_event_source, "user-lookup");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-06 02:43:58 +02:00
|
|
|
static unsigned manager_dispatch_cleanup_queue(Manager *m) {
|
2012-01-15 12:37:16 +01:00
|
|
|
Unit *u;
|
2010-04-06 02:43:58 +02:00
|
|
|
unsigned n = 0;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
while ((u = m->cleanup_queue)) {
|
|
|
|
assert(u->in_cleanup_queue);
|
2010-04-06 02:43:58 +02:00
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
unit_free(u);
|
2010-04-06 02:43:58 +02:00
|
|
|
n++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2010-04-23 18:47:49 +02:00
|
|
|
enum {
|
2011-02-21 15:32:17 +01:00
|
|
|
GC_OFFSET_IN_PATH, /* This one is on the path we were traveling */
|
2010-04-23 18:47:49 +02:00
|
|
|
GC_OFFSET_UNSURE, /* No clue */
|
|
|
|
GC_OFFSET_GOOD, /* We still need this unit */
|
|
|
|
GC_OFFSET_BAD, /* We don't need this unit anymore */
|
|
|
|
_GC_OFFSET_MAX
|
|
|
|
};
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
static void unit_gc_mark_good(Unit *u, unsigned gc_marker) {
|
2016-06-14 14:20:56 +02:00
|
|
|
Unit *other;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
Iterator i;
|
|
|
|
void *v;
|
2016-06-14 14:20:56 +02:00
|
|
|
|
|
|
|
u->gc_marker = gc_marker + GC_OFFSET_GOOD;
|
|
|
|
|
|
|
|
/* Recursively mark referenced units as GOOD as well */
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCES], i)
|
2016-06-14 14:20:56 +02:00
|
|
|
if (other->gc_marker == gc_marker + GC_OFFSET_UNSURE)
|
|
|
|
unit_gc_mark_good(other, gc_marker);
|
|
|
|
}
|
|
|
|
|
2010-04-23 18:47:49 +02:00
|
|
|
static void unit_gc_sweep(Unit *u, unsigned gc_marker) {
|
2010-04-21 06:01:13 +02:00
|
|
|
Unit *other;
|
2010-04-23 18:47:49 +02:00
|
|
|
bool is_bad;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
Iterator i;
|
|
|
|
void *v;
|
2010-04-21 06:01:13 +02:00
|
|
|
|
|
|
|
assert(u);
|
|
|
|
|
2017-10-04 16:01:32 +02:00
|
|
|
if (IN_SET(u->gc_marker - gc_marker,
|
|
|
|
GC_OFFSET_GOOD, GC_OFFSET_BAD, GC_OFFSET_UNSURE, GC_OFFSET_IN_PATH))
|
2010-04-21 06:01:13 +02:00
|
|
|
return;
|
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
if (u->in_cleanup_queue)
|
2010-04-21 06:01:13 +02:00
|
|
|
goto bad;
|
|
|
|
|
2018-02-13 10:50:13 +01:00
|
|
|
if (!unit_may_gc(u))
|
2010-04-21 06:01:13 +02:00
|
|
|
goto good;
|
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
u->gc_marker = gc_marker + GC_OFFSET_IN_PATH;
|
2010-04-23 18:47:49 +02:00
|
|
|
|
|
|
|
is_bad = true;
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_REFERENCED_BY], i) {
|
2010-04-21 06:01:13 +02:00
|
|
|
unit_gc_sweep(other, gc_marker);
|
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
if (other->gc_marker == gc_marker + GC_OFFSET_GOOD)
|
2010-04-21 06:01:13 +02:00
|
|
|
goto good;
|
2010-04-23 18:47:49 +02:00
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
if (other->gc_marker != gc_marker + GC_OFFSET_BAD)
|
2010-04-23 18:47:49 +02:00
|
|
|
is_bad = false;
|
2010-04-21 06:01:13 +02:00
|
|
|
}
|
|
|
|
|
2018-02-13 14:37:11 +01:00
|
|
|
if (u->refs_by_target) {
|
|
|
|
const UnitRef *ref;
|
|
|
|
|
|
|
|
LIST_FOREACH(refs_by_target, ref, u->refs_by_target) {
|
|
|
|
unit_gc_sweep(ref->source, gc_marker);
|
|
|
|
|
|
|
|
if (ref->source->gc_marker == gc_marker + GC_OFFSET_GOOD)
|
|
|
|
goto good;
|
|
|
|
|
|
|
|
if (ref->source->gc_marker != gc_marker + GC_OFFSET_BAD)
|
|
|
|
is_bad = false;
|
|
|
|
}
|
|
|
|
}
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2010-04-23 18:47:49 +02:00
|
|
|
if (is_bad)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
/* We were unable to find anything out about this entry, so
|
|
|
|
* let's investigate it later */
|
2012-01-15 12:04:08 +01:00
|
|
|
u->gc_marker = gc_marker + GC_OFFSET_UNSURE;
|
2010-04-23 18:47:49 +02:00
|
|
|
unit_add_to_gc_queue(u);
|
|
|
|
return;
|
|
|
|
|
2010-04-21 06:01:13 +02:00
|
|
|
bad:
|
2010-04-23 18:47:49 +02:00
|
|
|
/* We definitely know that this one is not useful anymore, so
|
|
|
|
* let's mark it for deletion */
|
2012-01-15 12:04:08 +01:00
|
|
|
u->gc_marker = gc_marker + GC_OFFSET_BAD;
|
2010-04-23 18:47:49 +02:00
|
|
|
unit_add_to_cleanup_queue(u);
|
2010-04-21 06:01:13 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
good:
|
2016-06-14 14:20:56 +02:00
|
|
|
unit_gc_mark_good(u, gc_marker);
|
2010-04-21 06:01:13 +02:00
|
|
|
}
|
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
static unsigned manager_dispatch_gc_unit_queue(Manager *m) {
|
|
|
|
unsigned n = 0, gc_marker;
|
2012-01-15 12:37:16 +01:00
|
|
|
Unit *u;
|
2010-04-21 06:01:13 +02:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2013-07-02 17:41:57 +02:00
|
|
|
/* log_debug("Running GC..."); */
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2010-04-23 18:47:49 +02:00
|
|
|
m->gc_marker += _GC_OFFSET_MAX;
|
|
|
|
if (m->gc_marker + _GC_OFFSET_MAX <= _GC_OFFSET_MAX)
|
2010-04-22 02:41:14 +02:00
|
|
|
m->gc_marker = 1;
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2010-04-23 18:47:49 +02:00
|
|
|
gc_marker = m->gc_marker;
|
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
while ((u = m->gc_unit_queue)) {
|
2012-01-15 12:37:16 +01:00
|
|
|
assert(u->in_gc_queue);
|
2010-04-21 06:01:13 +02:00
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
unit_gc_sweep(u, gc_marker);
|
2010-04-23 18:47:49 +02:00
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
LIST_REMOVE(gc_queue, m->gc_unit_queue, u);
|
2012-01-15 12:37:16 +01:00
|
|
|
u->in_gc_queue = false;
|
2010-04-21 06:01:13 +02:00
|
|
|
|
|
|
|
n++;
|
|
|
|
|
2017-10-04 16:01:32 +02:00
|
|
|
if (IN_SET(u->gc_marker - gc_marker,
|
|
|
|
GC_OFFSET_BAD, GC_OFFSET_UNSURE)) {
|
2015-03-15 17:12:19 +01:00
|
|
|
if (u->id)
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(u, "Collecting.");
|
2012-01-15 12:37:16 +01:00
|
|
|
u->gc_marker = gc_marker + GC_OFFSET_BAD;
|
|
|
|
unit_add_to_cleanup_queue(u);
|
2010-04-21 06:01:13 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
static unsigned manager_dispatch_gc_job_queue(Manager *m) {
|
|
|
|
unsigned n = 0;
|
|
|
|
Job *j;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
while ((j = m->gc_job_queue)) {
|
|
|
|
assert(j->in_gc_queue);
|
|
|
|
|
|
|
|
LIST_REMOVE(gc_queue, m->gc_job_queue, j);
|
|
|
|
j->in_gc_queue = false;
|
|
|
|
|
|
|
|
n++;
|
|
|
|
|
2018-02-14 00:39:06 +01:00
|
|
|
if (!job_may_gc(j))
|
2016-11-15 19:32:50 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
log_unit_debug(j->unit, "Collecting job.");
|
|
|
|
(void) job_finish_and_invalidate(j, JOB_COLLECTED, false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
static void manager_clear_jobs_and_units(Manager *m) {
|
|
|
|
Unit *u;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
while ((u = hashmap_first(m->units)))
|
|
|
|
unit_free(u);
|
2010-06-05 02:16:20 +02:00
|
|
|
|
|
|
|
manager_dispatch_cleanup_queue(m);
|
|
|
|
|
|
|
|
assert(!m->load_queue);
|
|
|
|
assert(!m->run_queue);
|
|
|
|
assert(!m->dbus_unit_queue);
|
|
|
|
assert(!m->dbus_job_queue);
|
|
|
|
assert(!m->cleanup_queue);
|
2016-11-15 19:32:50 +01:00
|
|
|
assert(!m->gc_unit_queue);
|
|
|
|
assert(!m->gc_job_queue);
|
2010-06-05 02:16:20 +02:00
|
|
|
|
|
|
|
assert(hashmap_isempty(m->jobs));
|
|
|
|
assert(hashmap_isempty(m->units));
|
2013-03-01 14:47:46 +01:00
|
|
|
|
|
|
|
m->n_on_console = 0;
|
|
|
|
m->n_running_jobs = 0;
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2014-11-08 16:06:12 +01:00
|
|
|
Manager* manager_free(Manager *m) {
|
2010-04-21 03:27:44 +02:00
|
|
|
UnitType c;
|
2017-07-18 16:30:52 +02:00
|
|
|
ExecDirectoryType dt;
|
2010-01-26 21:39:06 +01:00
|
|
|
|
2014-11-08 16:06:12 +01:00
|
|
|
if (!m)
|
|
|
|
return NULL;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
manager_clear_jobs_and_units(m);
|
2010-04-06 02:43:58 +02:00
|
|
|
|
2010-01-28 06:45:44 +01:00
|
|
|
for (c = 0; c < _UNIT_TYPE_MAX; c++)
|
|
|
|
if (unit_vtable[c]->shutdown)
|
|
|
|
unit_vtable[c]->shutdown(m);
|
|
|
|
|
2017-09-14 19:26:29 +02:00
|
|
|
/* If we reexecute ourselves, we keep the root cgroup around */
|
2010-07-11 00:50:49 +02:00
|
|
|
manager_shutdown_cgroup(m, m->exit_code != MANAGER_REEXECUTE);
|
2010-03-31 16:29:55 +02:00
|
|
|
|
2016-04-06 20:47:44 +02:00
|
|
|
lookup_paths_flush_generator(&m->lookup_paths);
|
2010-11-11 21:28:33 +01:00
|
|
|
|
2010-06-19 03:04:04 +02:00
|
|
|
bus_done(m);
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2018-02-06 08:00:34 +01:00
|
|
|
exec_runtime_vacuum(m);
|
|
|
|
hashmap_free(m->exec_runtime_by_id);
|
|
|
|
|
2016-07-14 12:37:28 +02:00
|
|
|
dynamic_user_vacuum(m, false);
|
|
|
|
hashmap_free(m->dynamic_users);
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
hashmap_free(m->units);
|
2016-08-30 23:18:46 +02:00
|
|
|
hashmap_free(m->units_by_invocation_id);
|
2009-11-18 00:42:52 +01:00
|
|
|
hashmap_free(m->jobs);
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
hashmap_free(m->watch_pids);
|
2010-04-15 23:16:16 +02:00
|
|
|
hashmap_free(m->watch_bus);
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2014-05-15 17:09:34 +02:00
|
|
|
set_free(m->startup_units);
|
2014-03-12 20:55:13 +01:00
|
|
|
set_free(m->failed_units);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source_unref(m->signal_event_source);
|
2018-01-23 18:18:13 +01:00
|
|
|
sd_event_source_unref(m->sigchld_event_source);
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source_unref(m->notify_event_source);
|
2016-05-04 20:43:23 +02:00
|
|
|
sd_event_source_unref(m->cgroups_agent_event_source);
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_source_unref(m->time_change_event_source);
|
|
|
|
sd_event_source_unref(m->jobs_in_progress_event_source);
|
2013-11-25 15:22:41 +01:00
|
|
|
sd_event_source_unref(m->run_queue_event_source);
|
2016-08-01 19:24:40 +02:00
|
|
|
sd_event_source_unref(m->user_lookup_event_source);
|
2018-02-07 22:36:51 +01:00
|
|
|
sd_event_source_unref(m->sync_bus_names_event_source);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2014-03-18 19:22:43 +01:00
|
|
|
safe_close(m->signal_fd);
|
|
|
|
safe_close(m->notify_fd);
|
2016-05-04 20:43:23 +02:00
|
|
|
safe_close(m->cgroups_agent_fd);
|
2014-03-18 19:22:43 +01:00
|
|
|
safe_close(m->time_change_fd);
|
2016-08-01 19:24:40 +02:00
|
|
|
safe_close_pair(m->user_lookup_fds);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
manager_close_ask_password(m);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
manager_close_idle_pipe(m);
|
|
|
|
|
2013-11-25 21:08:39 +01:00
|
|
|
udev_unref(m->udev);
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_event_unref(m->event);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-06-18 23:12:48 +02:00
|
|
|
free(m->notify_socket);
|
|
|
|
|
2010-06-15 14:45:15 +02:00
|
|
|
lookup_paths_free(&m->lookup_paths);
|
2010-05-09 23:53:52 +02:00
|
|
|
strv_free(m->environment);
|
2010-02-13 01:07:02 +01:00
|
|
|
|
2013-06-27 04:14:27 +02:00
|
|
|
hashmap_free(m->cgroup_unit);
|
2010-07-11 00:50:49 +02:00
|
|
|
set_free_free(m->unit_path_cache);
|
2010-06-18 20:15:34 +02:00
|
|
|
|
2012-05-09 01:24:50 +02:00
|
|
|
free(m->switch_root);
|
|
|
|
free(m->switch_root_init);
|
|
|
|
|
2018-05-03 19:05:59 +02:00
|
|
|
rlimit_free_all(m->rlimit);
|
2012-03-21 18:03:40 +01:00
|
|
|
|
2013-09-26 20:14:24 +02:00
|
|
|
assert(hashmap_isempty(m->units_requiring_mounts_for));
|
|
|
|
hashmap_free(m->units_requiring_mounts_for);
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
hashmap_free(m->uid_refs);
|
|
|
|
hashmap_free(m->gid_refs);
|
|
|
|
|
2017-09-28 16:58:43 +02:00
|
|
|
for (dt = 0; dt < _EXEC_DIRECTORY_TYPE_MAX; dt++)
|
2017-07-18 16:30:52 +02:00
|
|
|
m->prefix[dt] = mfree(m->prefix[dt]);
|
|
|
|
|
2016-10-17 00:28:30 +02:00
|
|
|
return mfree(m);
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
|
|
|
|
2018-04-20 12:12:11 +02:00
|
|
|
static void manager_enumerate(Manager *m) {
|
2010-01-29 03:18:09 +01:00
|
|
|
UnitType c;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
/* Let's ask every type to load all units from disk/kernel
|
|
|
|
* that it might know */
|
2014-12-12 21:05:32 +01:00
|
|
|
for (c = 0; c < _UNIT_TYPE_MAX; c++) {
|
2015-04-30 01:29:00 +02:00
|
|
|
if (!unit_type_supported(c)) {
|
2015-02-20 10:53:28 +01:00
|
|
|
log_debug("Unit type .%s is not supported on this system.", unit_type_to_string(c));
|
2014-12-12 21:05:32 +01:00
|
|
|
continue;
|
2013-09-26 20:14:24 +02:00
|
|
|
}
|
2010-01-29 03:18:09 +01:00
|
|
|
|
2018-04-20 12:10:32 +02:00
|
|
|
if (unit_vtable[c]->enumerate)
|
|
|
|
unit_vtable[c]->enumerate(m);
|
2014-12-12 21:05:32 +01:00
|
|
|
}
|
|
|
|
|
2010-01-29 03:18:09 +01:00
|
|
|
manager_dispatch_load_queue(m);
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2015-04-24 16:09:15 +02:00
|
|
|
static void manager_coldplug(Manager *m) {
|
2010-04-21 03:27:44 +02:00
|
|
|
Iterator i;
|
|
|
|
Unit *u;
|
|
|
|
char *k;
|
2015-04-24 16:09:15 +02:00
|
|
|
int r;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
assert(m);
|
2010-01-29 03:18:09 +01:00
|
|
|
|
|
|
|
/* Then, let's set up their initial state. */
|
|
|
|
HASHMAP_FOREACH_KEY(u, k, m->units, i) {
|
|
|
|
|
|
|
|
/* ignore aliases */
|
2012-01-15 12:04:08 +01:00
|
|
|
if (u->id != k)
|
2010-01-29 03:18:09 +01:00
|
|
|
continue;
|
|
|
|
|
2015-04-24 16:09:15 +02:00
|
|
|
r = unit_coldplug(u);
|
|
|
|
if (r < 0)
|
|
|
|
log_warning_errno(r, "We couldn't coldplug %s, proceeding anyway: %m", u->id);
|
2010-01-29 03:18:09 +01:00
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2010-07-11 00:52:00 +02:00
|
|
|
static void manager_build_unit_path_cache(Manager *m) {
|
|
|
|
char **i;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
set_free_free(m->unit_path_cache);
|
|
|
|
|
2018-02-08 18:58:35 +01:00
|
|
|
m->unit_path_cache = set_new(&path_hash_ops);
|
2013-02-07 04:49:19 +01:00
|
|
|
if (!m->unit_path_cache) {
|
2016-02-26 18:28:45 +01:00
|
|
|
r = -ENOMEM;
|
|
|
|
goto fail;
|
2010-07-11 00:52:00 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This simply builds a list of files we know exist, so that
|
|
|
|
* we don't always have to go to disk */
|
|
|
|
|
2016-02-24 15:31:33 +01:00
|
|
|
STRV_FOREACH(i, m->lookup_paths.search_path) {
|
2016-02-26 18:28:45 +01:00
|
|
|
_cleanup_closedir_ DIR *d = NULL;
|
2010-07-11 00:52:00 +02:00
|
|
|
struct dirent *de;
|
|
|
|
|
2013-01-02 22:03:35 +01:00
|
|
|
d = opendir(*i);
|
|
|
|
if (!d) {
|
2013-02-07 04:49:19 +01:00
|
|
|
if (errno != ENOENT)
|
2016-02-26 18:28:45 +01:00
|
|
|
log_warning_errno(errno, "Failed to open directory %s, ignoring: %m", *i);
|
2010-07-11 00:52:00 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-02-26 18:28:45 +01:00
|
|
|
FOREACH_DIRENT(de, d, r = -errno; goto fail) {
|
2010-07-11 00:52:00 +02:00
|
|
|
char *p;
|
|
|
|
|
2016-10-23 17:43:27 +02:00
|
|
|
p = strjoin(streq(*i, "/") ? "" : *i, "/", de->d_name);
|
2011-08-01 02:39:22 +02:00
|
|
|
if (!p) {
|
2010-07-11 00:52:00 +02:00
|
|
|
r = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2013-04-23 05:12:15 +02:00
|
|
|
r = set_consume(m->unit_path_cache, p);
|
|
|
|
if (r < 0)
|
2010-07-11 00:52:00 +02:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
fail:
|
2016-02-26 18:28:45 +01:00
|
|
|
log_warning_errno(r, "Failed to build unit path cache, proceeding without: %m");
|
|
|
|
m->unit_path_cache = set_free_free(m->unit_path_cache);
|
2010-07-11 00:52:00 +02:00
|
|
|
}
|
|
|
|
|
2015-11-10 20:42:58 +01:00
|
|
|
static void manager_distribute_fds(Manager *m, FDSet *fds) {
|
2013-11-08 18:11:09 +01:00
|
|
|
Iterator i;
|
2015-11-10 20:42:58 +01:00
|
|
|
Unit *u;
|
2013-11-08 18:11:09 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
HASHMAP_FOREACH(u, m->units, i) {
|
|
|
|
|
|
|
|
if (fdset_size(fds) <= 0)
|
|
|
|
break;
|
|
|
|
|
2015-11-10 20:42:58 +01:00
|
|
|
if (!UNIT_VTABLE(u)->distribute_fds)
|
|
|
|
continue;
|
2013-11-08 18:11:09 +01:00
|
|
|
|
2015-11-10 20:42:58 +01:00
|
|
|
UNIT_VTABLE(u)->distribute_fds(u, fds);
|
|
|
|
}
|
2013-11-08 18:11:09 +01:00
|
|
|
}
|
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
static bool manager_dbus_is_running(Manager *m, bool deserialized) {
|
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* This checks whether the dbus instance we are supposed to expose our APIs on is up. We check both the socket
|
|
|
|
* and the service unit. If the 'deserialized' parameter is true we'll check the deserialized state of the unit
|
|
|
|
* rather than the current one. */
|
|
|
|
|
|
|
|
if (m->test_run_flags != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* If we are in the user instance, and the env var is already set for us, then this means D-Bus is ran
|
|
|
|
* somewhere outside of our own logic. Let's use it */
|
|
|
|
if (MANAGER_IS_USER(m) && getenv("DBUS_SESSION_BUS_ADDRESS"))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
u = manager_get_unit(m, SPECIAL_DBUS_SOCKET);
|
|
|
|
if (!u)
|
|
|
|
return false;
|
|
|
|
if ((deserialized ? SOCKET(u)->deserialized_state : SOCKET(u)->state) != SOCKET_RUNNING)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
u = manager_get_unit(m, SPECIAL_DBUS_SERVICE);
|
|
|
|
if (!u)
|
|
|
|
return false;
|
|
|
|
if (!IN_SET((deserialized ? SERVICE(u)->deserialized_state : SERVICE(u)->state), SERVICE_RUNNING, SERVICE_RELOAD))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
|
2017-11-15 10:38:04 +01:00
|
|
|
int r;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2017-09-14 19:26:29 +02:00
|
|
|
/* If we are running in test mode, we still want to run the generators,
|
|
|
|
* but we should not touch the real generator directories. */
|
|
|
|
r = lookup_paths_init(&m->lookup_paths, m->unit_file_scope,
|
2017-09-16 11:19:43 +02:00
|
|
|
m->test_run_flags ? LOOKUP_PATHS_TEMPORARY_GENERATED : 0,
|
2017-09-14 19:26:29 +02:00
|
|
|
NULL);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
r = manager_run_environment_generators(m);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_START);
|
2016-02-24 15:31:33 +01:00
|
|
|
r = manager_run_generators(m);
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_GENERATORS_FINISH);
|
2012-05-23 03:43:29 +02:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2017-11-15 19:56:21 +01:00
|
|
|
/* If this is the first boot, and we are in the host system, then preset everything */
|
2017-09-14 15:44:48 +02:00
|
|
|
if (m->first_boot > 0 &&
|
2017-11-15 19:56:21 +01:00
|
|
|
MANAGER_IS_SYSTEM(m) &&
|
2017-09-16 11:19:43 +02:00
|
|
|
!m->test_run_flags) {
|
2017-09-14 15:44:48 +02:00
|
|
|
|
2017-11-15 10:38:04 +01:00
|
|
|
r = unit_file_preset_all(UNIT_FILE_SYSTEM, 0, NULL, UNIT_FILE_PRESET_ENABLE_ONLY, NULL, 0);
|
|
|
|
if (r < 0)
|
|
|
|
log_full_errno(r == -EEXIST ? LOG_NOTICE : LOG_WARNING, r,
|
|
|
|
"Failed to populate /etc with preset unit settings, ignoring: %m");
|
2017-08-06 15:24:24 +02:00
|
|
|
else
|
|
|
|
log_info("Populated /etc with preset unit settings.");
|
|
|
|
}
|
|
|
|
|
2016-02-25 02:32:19 +01:00
|
|
|
lookup_paths_reduce(&m->lookup_paths);
|
2010-07-11 00:52:00 +02:00
|
|
|
manager_build_unit_path_cache(m);
|
|
|
|
|
2010-07-13 19:01:20 +02:00
|
|
|
/* If we will deserialize make sure that during enumeration
|
|
|
|
* this is already known, so we increase the counter here
|
|
|
|
* already */
|
|
|
|
if (serialization)
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading++;
|
2010-07-13 19:01:20 +02:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
/* First, enumerate what we can from all config files */
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_START);
|
2015-11-10 20:36:37 +01:00
|
|
|
manager_enumerate(m);
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_UNITS_LOAD_FINISH);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
/* Second, deserialize if there is something to deserialize */
|
2017-07-31 08:05:35 +02:00
|
|
|
if (serialization) {
|
2014-07-18 23:05:18 +02:00
|
|
|
r = manager_deserialize(m, serialization, fds);
|
2017-07-31 08:05:35 +02:00
|
|
|
if (r < 0)
|
2017-11-15 10:38:04 +01:00
|
|
|
return log_error_errno(r, "Deserialization failed: %m");
|
2017-07-31 08:05:35 +02:00
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2012-12-22 19:30:07 +01:00
|
|
|
/* Any fds left? Find some unit which wants them. This is
|
|
|
|
* useful to allow container managers to pass some file
|
|
|
|
* descriptors to us pre-initialized. This enables
|
|
|
|
* socket-based activation of entire containers. */
|
2015-11-10 20:42:58 +01:00
|
|
|
manager_distribute_fds(m, fds);
|
2012-12-22 19:30:07 +01:00
|
|
|
|
2013-12-21 00:19:37 +01:00
|
|
|
/* We might have deserialized the notify fd, but if we didn't
|
|
|
|
* then let's create the bus now */
|
2017-11-15 10:38:04 +01:00
|
|
|
r = manager_setup_notify(m);
|
|
|
|
if (r < 0)
|
|
|
|
/* No sense to continue without notifications, our children would fail anyway. */
|
|
|
|
return r;
|
2013-12-21 00:19:37 +01:00
|
|
|
|
2017-11-15 10:38:04 +01:00
|
|
|
r = manager_setup_cgroups_agent(m);
|
|
|
|
if (r < 0)
|
|
|
|
/* Likewise, no sense to continue without empty cgroup notifications. */
|
|
|
|
return r;
|
2016-05-04 20:43:23 +02:00
|
|
|
|
2017-11-15 10:38:04 +01:00
|
|
|
r = manager_setup_user_lookup_fd(m);
|
|
|
|
if (r < 0)
|
|
|
|
/* This shouldn't fail, except if things are really broken. */
|
|
|
|
return r;
|
2016-08-01 19:24:40 +02:00
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
/* Let's set up our private bus connection now, unconditionally */
|
|
|
|
(void) bus_init_private(m);
|
2016-08-15 18:12:01 +02:00
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
/* If we are in --user mode also connect to the system bus now */
|
|
|
|
if (MANAGER_IS_USER(m))
|
|
|
|
(void) bus_init_system(m);
|
|
|
|
|
|
|
|
/* Let's connect to the bus now, but only if the unit is supposed to be up */
|
|
|
|
if (manager_dbus_is_running(m, !!serialization)) {
|
|
|
|
(void) bus_init_api(m);
|
2016-08-15 18:12:01 +02:00
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
(void) bus_init_system(m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now that we are connected to all possible busses, let's deserialize who is tracking us. */
|
2016-08-15 18:12:01 +02:00
|
|
|
(void) bus_track_coldplug(m, &m->subscribed, false, m->deserialized_subscribed);
|
|
|
|
m->deserialized_subscribed = strv_free(m->deserialized_subscribed);
|
2013-11-30 03:53:42 +01:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
/* Third, fire things up! */
|
2015-04-24 16:09:15 +02:00
|
|
|
manager_coldplug(m);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2016-07-14 12:37:28 +02:00
|
|
|
/* Release any dynamic users no longer referenced */
|
|
|
|
dynamic_user_vacuum(m, true);
|
|
|
|
|
2018-02-06 08:00:34 +01:00
|
|
|
exec_runtime_vacuum(m);
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
/* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
|
|
|
|
manager_vacuum_uid_refs(m);
|
|
|
|
manager_vacuum_gid_refs(m);
|
|
|
|
|
2010-07-13 19:01:20 +02:00
|
|
|
if (serialization) {
|
2011-07-06 00:47:39 +02:00
|
|
|
assert(m->n_reloading > 0);
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2013-07-10 21:10:53 +02:00
|
|
|
|
|
|
|
/* Let's wait for the UnitNew/JobNew messages being
|
|
|
|
* sent, before we notify that the reload is
|
|
|
|
* finished */
|
|
|
|
m->send_reloading_done = true;
|
2010-07-13 19:01:20 +02:00
|
|
|
}
|
|
|
|
|
2017-11-15 10:38:04 +01:00
|
|
|
return 0;
|
2010-01-29 03:18:09 +01:00
|
|
|
}
|
|
|
|
|
2015-11-12 19:52:31 +01:00
|
|
|
int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, sd_bus_error *e, Job **_ret) {
|
2010-01-20 02:12:51 +01:00
|
|
|
int r;
|
2012-04-20 10:22:07 +02:00
|
|
|
Transaction *tr;
|
2010-01-20 02:12:51 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(type < _JOB_TYPE_MAX);
|
2010-01-26 21:39:06 +01:00
|
|
|
assert(unit);
|
2010-01-20 02:12:51 +01:00
|
|
|
assert(mode < _JOB_MODE_MAX);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2014-10-31 01:32:17 +01:00
|
|
|
if (mode == JOB_ISOLATE && type != JOB_START)
|
|
|
|
return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Isolate is only valid for start.");
|
2010-04-22 02:42:59 +02:00
|
|
|
|
2014-10-31 01:32:17 +01:00
|
|
|
if (mode == JOB_ISOLATE && !unit->allow_isolate)
|
|
|
|
return sd_bus_error_setf(e, BUS_ERROR_NO_ISOLATION, "Operation refused, unit may not be isolated.");
|
2010-08-30 22:45:46 +02:00
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(unit, "Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
|
2010-01-29 04:11:36 +01:00
|
|
|
|
2015-05-19 18:13:22 +02:00
|
|
|
type = job_type_collapse(type, unit);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
2013-02-22 11:21:37 +01:00
|
|
|
tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
|
2012-04-20 10:22:07 +02:00
|
|
|
if (!tr)
|
|
|
|
return -ENOMEM;
|
2010-01-19 04:15:20 +01:00
|
|
|
|
2015-11-12 19:52:31 +01:00
|
|
|
r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, false,
|
2017-09-29 00:37:23 +02:00
|
|
|
IN_SET(mode, JOB_IGNORE_DEPENDENCIES, JOB_IGNORE_REQUIREMENTS),
|
2012-04-20 00:33:26 +02:00
|
|
|
mode == JOB_IGNORE_DEPENDENCIES, e);
|
2012-04-20 10:22:07 +02:00
|
|
|
if (r < 0)
|
|
|
|
goto tr_abort;
|
2010-04-22 02:42:59 +02:00
|
|
|
|
2012-04-20 10:22:07 +02:00
|
|
|
if (mode == JOB_ISOLATE) {
|
|
|
|
r = transaction_add_isolate_jobs(tr, m);
|
|
|
|
if (r < 0)
|
|
|
|
goto tr_abort;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = transaction_activate(tr, m, mode, e);
|
|
|
|
if (r < 0)
|
|
|
|
goto tr_abort;
|
2010-01-20 02:12:51 +01:00
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(unit,
|
2013-01-05 18:00:35 +01:00
|
|
|
"Enqueued job %s/%s as %u", unit->id,
|
|
|
|
job_type_to_string(type), (unsigned) tr->anchor_job->id);
|
2010-01-29 03:18:09 +01:00
|
|
|
|
2010-01-20 02:12:51 +01:00
|
|
|
if (_ret)
|
2012-04-20 00:33:26 +02:00
|
|
|
*_ret = tr->anchor_job;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2012-04-20 10:22:07 +02:00
|
|
|
transaction_free(tr);
|
2010-01-20 02:12:51 +01:00
|
|
|
return 0;
|
2012-04-20 10:22:07 +02:00
|
|
|
|
|
|
|
tr_abort:
|
|
|
|
transaction_abort(tr);
|
|
|
|
transaction_free(tr);
|
|
|
|
return r;
|
2010-01-20 02:12:51 +01:00
|
|
|
}
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2015-11-12 20:13:42 +01:00
|
|
|
int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, sd_bus_error *e, Job **ret) {
|
2017-02-12 18:56:40 +01:00
|
|
|
Unit *unit = NULL; /* just to appease gcc, initialization is not really necessary */
|
2010-04-13 01:59:06 +02:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(type < _JOB_TYPE_MAX);
|
|
|
|
assert(name);
|
|
|
|
assert(mode < _JOB_MODE_MAX);
|
|
|
|
|
2012-09-18 01:55:49 +02:00
|
|
|
r = manager_load_unit(m, name, NULL, NULL, &unit);
|
|
|
|
if (r < 0)
|
2010-04-13 01:59:06 +02:00
|
|
|
return r;
|
2017-02-12 18:56:40 +01:00
|
|
|
assert(unit);
|
2010-04-13 01:59:06 +02:00
|
|
|
|
2015-11-12 20:13:42 +01:00
|
|
|
return manager_add_job(m, type, unit, mode, e, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_add_job_by_name_and_warn(Manager *m, JobType type, const char *name, JobMode mode, Job **ret) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
2015-11-12 20:13:42 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(type < _JOB_TYPE_MAX);
|
|
|
|
assert(name);
|
|
|
|
assert(mode < _JOB_MODE_MAX);
|
|
|
|
|
|
|
|
r = manager_add_job_by_name(m, type, name, mode, &error, ret);
|
|
|
|
if (r < 0)
|
|
|
|
return log_warning_errno(r, "Failed to enqueue %s job for %s: %s", job_mode_to_string(mode), name, bus_error_message(&error, r));
|
|
|
|
|
|
|
|
return r;
|
2010-04-13 01:59:06 +02:00
|
|
|
}
|
|
|
|
|
2017-08-07 11:27:24 +02:00
|
|
|
int manager_propagate_reload(Manager *m, Unit *unit, JobMode mode, sd_bus_error *e) {
|
|
|
|
int r;
|
|
|
|
Transaction *tr;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(unit);
|
|
|
|
assert(mode < _JOB_MODE_MAX);
|
|
|
|
assert(mode != JOB_ISOLATE); /* Isolate is only valid for start */
|
|
|
|
|
|
|
|
tr = transaction_new(mode == JOB_REPLACE_IRREVERSIBLY);
|
|
|
|
if (!tr)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* We need an anchor job */
|
|
|
|
r = transaction_add_job_and_dependencies(tr, JOB_NOP, unit, NULL, false, false, true, true, e);
|
|
|
|
if (r < 0)
|
|
|
|
goto tr_abort;
|
|
|
|
|
|
|
|
/* Failure in adding individual dependencies is ignored, so this always succeeds. */
|
|
|
|
transaction_add_propagate_reload_jobs(tr, unit, tr->anchor_job, mode == JOB_IGNORE_DEPENDENCIES, e);
|
|
|
|
|
|
|
|
r = transaction_activate(tr, m, mode, e);
|
|
|
|
if (r < 0)
|
|
|
|
goto tr_abort;
|
|
|
|
|
|
|
|
transaction_free(tr);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
tr_abort:
|
|
|
|
transaction_abort(tr);
|
|
|
|
transaction_free(tr);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2009-11-18 00:42:52 +01:00
|
|
|
Job *manager_get_job(Manager *m, uint32_t id) {
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
return hashmap_get(m->jobs, UINT32_TO_PTR(id));
|
|
|
|
}
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *manager_get_unit(Manager *m, const char *name) {
|
2009-11-18 00:42:52 +01:00
|
|
|
assert(m);
|
|
|
|
assert(name);
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
return hashmap_get(m->units, name);
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
|
|
|
|
2018-03-23 15:28:06 +01:00
|
|
|
static int manager_dispatch_target_deps_queue(Manager *m) {
|
|
|
|
Unit *u;
|
|
|
|
unsigned k;
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
static const UnitDependency deps[] = {
|
|
|
|
UNIT_REQUIRED_BY,
|
|
|
|
UNIT_REQUISITE_OF,
|
|
|
|
UNIT_WANTED_BY,
|
|
|
|
UNIT_BOUND_BY
|
|
|
|
};
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
while ((u = m->target_deps_queue)) {
|
|
|
|
assert(u->in_target_deps_queue);
|
|
|
|
|
|
|
|
LIST_REMOVE(target_deps_queue, u->manager->target_deps_queue, u);
|
|
|
|
u->in_target_deps_queue = false;
|
|
|
|
|
|
|
|
for (k = 0; k < ELEMENTSOF(deps); k++) {
|
|
|
|
Unit *target;
|
|
|
|
Iterator i;
|
|
|
|
void *v;
|
|
|
|
|
|
|
|
HASHMAP_FOREACH_KEY(v, target, u->dependencies[deps[k]], i) {
|
|
|
|
r = unit_add_default_target_dependency(u, target);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
unsigned manager_dispatch_load_queue(Manager *m) {
|
2012-01-15 12:37:16 +01:00
|
|
|
Unit *u;
|
2010-02-05 00:38:41 +01:00
|
|
|
unsigned n = 0;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2009-11-19 02:52:17 +01:00
|
|
|
/* Make sure we are not run recursively */
|
|
|
|
if (m->dispatching_load_queue)
|
2010-02-05 00:38:41 +01:00
|
|
|
return 0;
|
2009-11-19 02:52:17 +01:00
|
|
|
|
|
|
|
m->dispatching_load_queue = true;
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
/* Dispatches the load queue. Takes a unit from the queue and
|
2009-11-18 00:42:52 +01:00
|
|
|
* tries to load its data until the queue is empty */
|
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
while ((u = m->load_queue)) {
|
|
|
|
assert(u->in_load_queue);
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
unit_load(u);
|
2010-02-05 00:38:41 +01:00
|
|
|
n++;
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
|
|
|
|
2009-11-19 02:52:17 +01:00
|
|
|
m->dispatching_load_queue = false;
|
2018-03-23 15:28:06 +01:00
|
|
|
|
|
|
|
/* Dispatch the units waiting for their target dependencies to be added now, as all targets that we know about
|
|
|
|
* should be loaded and have aliases resolved */
|
|
|
|
(void) manager_dispatch_target_deps_queue(m);
|
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
return n;
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
|
|
|
|
2013-06-28 04:12:58 +02:00
|
|
|
int manager_load_unit_prepare(
|
|
|
|
Manager *m,
|
|
|
|
const char *name,
|
|
|
|
const char *path,
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_bus_error *e,
|
2013-06-28 04:12:58 +02:00
|
|
|
Unit **_ret) {
|
|
|
|
|
2018-03-09 21:34:28 +01:00
|
|
|
_cleanup_(unit_freep) Unit *cleanup_ret = NULL;
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *ret;
|
2012-01-15 10:53:49 +01:00
|
|
|
UnitType t;
|
2009-11-18 00:42:52 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
2010-04-15 03:11:11 +02:00
|
|
|
assert(name || path);
|
2017-02-12 18:40:09 +01:00
|
|
|
assert(_ret);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-04-24 04:26:33 +02:00
|
|
|
/* This will prepare the unit for loading, but not actually
|
|
|
|
* load anything from disk. */
|
2010-01-27 00:15:56 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
if (path && !is_path(path))
|
|
|
|
return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Path %s is not absolute.", path);
|
2010-04-15 03:11:11 +02:00
|
|
|
|
|
|
|
if (!name)
|
2013-12-07 03:29:55 +01:00
|
|
|
name = basename(path);
|
2010-04-15 03:11:11 +02:00
|
|
|
|
2012-01-15 10:53:49 +01:00
|
|
|
t = unit_name_to_type(name);
|
|
|
|
|
2016-03-30 13:49:50 +02:00
|
|
|
if (t == _UNIT_TYPE_INVALID || !unit_name_is_valid(name, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
|
|
|
|
if (unit_name_is_valid(name, UNIT_NAME_TEMPLATE))
|
|
|
|
return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is missing the instance name.", name);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS, "Unit name %s is not valid.", name);
|
2016-03-30 13:49:50 +02:00
|
|
|
}
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2012-01-15 10:53:49 +01:00
|
|
|
ret = manager_get_unit(m, name);
|
|
|
|
if (ret) {
|
2010-01-26 04:18:44 +01:00
|
|
|
*_ret = ret;
|
2010-05-16 04:30:45 +02:00
|
|
|
return 1;
|
2010-01-26 04:18:44 +01:00
|
|
|
}
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2018-03-09 21:34:28 +01:00
|
|
|
ret = cleanup_ret = unit_new(m, unit_vtable[t]->object_size);
|
2012-01-15 10:53:49 +01:00
|
|
|
if (!ret)
|
2009-11-18 00:42:52 +01:00
|
|
|
return -ENOMEM;
|
|
|
|
|
2012-01-15 10:53:49 +01:00
|
|
|
if (path) {
|
2012-01-15 12:04:08 +01:00
|
|
|
ret->fragment_path = strdup(path);
|
2018-03-09 21:34:28 +01:00
|
|
|
if (!ret->fragment_path)
|
2010-01-27 00:15:56 +01:00
|
|
|
return -ENOMEM;
|
2012-01-15 10:53:49 +01:00
|
|
|
}
|
2010-01-27 00:15:56 +01:00
|
|
|
|
2013-05-31 02:28:09 +02:00
|
|
|
r = unit_add_name(ret, name);
|
2018-03-09 21:34:28 +01:00
|
|
|
if (r < 0)
|
2010-01-21 00:51:37 +01:00
|
|
|
return r;
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
unit_add_to_load_queue(ret);
|
2010-02-05 00:38:41 +01:00
|
|
|
unit_add_to_dbus_queue(ret);
|
2010-05-16 04:31:07 +02:00
|
|
|
unit_add_to_gc_queue(ret);
|
2010-02-05 00:38:41 +01:00
|
|
|
|
2017-02-12 18:40:09 +01:00
|
|
|
*_ret = ret;
|
2018-03-09 21:34:28 +01:00
|
|
|
cleanup_ret = NULL;
|
2010-04-24 04:26:33 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-06-28 04:12:58 +02:00
|
|
|
int manager_load_unit(
|
|
|
|
Manager *m,
|
|
|
|
const char *name,
|
|
|
|
const char *path,
|
2013-11-19 21:12:59 +01:00
|
|
|
sd_bus_error *e,
|
2013-06-28 04:12:58 +02:00
|
|
|
Unit **_ret) {
|
|
|
|
|
2010-04-24 04:26:33 +02:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
2017-02-12 18:40:09 +01:00
|
|
|
assert(_ret);
|
2010-04-24 04:26:33 +02:00
|
|
|
|
|
|
|
/* This will load the service information files, but not actually
|
|
|
|
* start any services or anything. */
|
|
|
|
|
2012-09-18 01:55:49 +02:00
|
|
|
r = manager_load_unit_prepare(m, name, path, e, _ret);
|
|
|
|
if (r != 0)
|
2010-04-24 04:26:33 +02:00
|
|
|
return r;
|
|
|
|
|
2010-01-29 03:18:09 +01:00
|
|
|
manager_dispatch_load_queue(m);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2017-02-12 18:40:09 +01:00
|
|
|
*_ret = unit_follow_merge(*_ret);
|
2018-04-12 15:13:14 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_load_startable_unit_or_warn(
|
|
|
|
Manager *m,
|
|
|
|
const char *name,
|
|
|
|
const char *path,
|
|
|
|
Unit **ret) {
|
|
|
|
|
|
|
|
/* Load a unit, make sure it loaded fully and is not masked. */
|
|
|
|
|
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
|
|
|
Unit *unit;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = manager_load_unit(m, name, path, &error, &unit);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to load %s %s: %s",
|
|
|
|
name ? "unit" : "file", name ?: path,
|
|
|
|
bus_error_message(&error, r));
|
|
|
|
else if (IN_SET(unit->load_state, UNIT_ERROR, UNIT_NOT_FOUND))
|
|
|
|
return log_error_errno(unit->load_error, "Failed to load %s %s: %m",
|
|
|
|
name ? "unit" : "file", name ?: path);
|
|
|
|
else if (unit->load_state == UNIT_MASKED) {
|
|
|
|
log_error("%s %s is masked.",
|
|
|
|
name ? "Unit" : "File", name ?: path);
|
|
|
|
return -ERFKILL;
|
|
|
|
}
|
2010-04-15 03:11:11 +02:00
|
|
|
|
2018-04-12 15:13:14 +02:00
|
|
|
*ret = unit;
|
2009-11-18 00:42:52 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2010-01-19 00:22:34 +01:00
|
|
|
|
2010-01-20 04:02:39 +01:00
|
|
|
void manager_dump_jobs(Manager *s, FILE *f, const char *prefix) {
|
2010-01-26 04:18:44 +01:00
|
|
|
Iterator i;
|
2010-01-19 00:22:34 +01:00
|
|
|
Job *j;
|
|
|
|
|
|
|
|
assert(s);
|
|
|
|
assert(f);
|
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
HASHMAP_FOREACH(j, s->jobs, i)
|
2010-01-20 04:02:39 +01:00
|
|
|
job_dump(j, f, prefix);
|
2010-01-19 00:22:34 +01:00
|
|
|
}
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
void manager_dump_units(Manager *s, FILE *f, const char *prefix) {
|
2010-01-26 04:18:44 +01:00
|
|
|
Iterator i;
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *u;
|
2010-01-19 04:15:20 +01:00
|
|
|
const char *t;
|
2010-01-19 00:22:34 +01:00
|
|
|
|
|
|
|
assert(s);
|
|
|
|
assert(f);
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
HASHMAP_FOREACH_KEY(u, t, s->units, i)
|
2012-01-15 12:04:08 +01:00
|
|
|
if (u->id == t)
|
2010-01-26 21:39:06 +01:00
|
|
|
unit_dump(u, f, prefix);
|
2010-01-19 00:22:34 +01:00
|
|
|
}
|
2010-01-20 05:03:52 +01:00
|
|
|
|
2017-11-20 21:11:32 +01:00
|
|
|
void manager_dump(Manager *m, FILE *f, const char *prefix) {
|
|
|
|
ManagerTimestamp q;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(f);
|
|
|
|
|
|
|
|
for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
|
|
|
|
char buf[FORMAT_TIMESTAMP_MAX];
|
|
|
|
|
|
|
|
if (dual_timestamp_is_set(m->timestamps + q))
|
|
|
|
fprintf(f, "%sTimestamp %s: %s\n",
|
|
|
|
strempty(prefix),
|
|
|
|
manager_timestamp_to_string(q),
|
|
|
|
format_timestamp(buf, sizeof(buf), m->timestamps[q].realtime));
|
|
|
|
}
|
|
|
|
|
|
|
|
manager_dump_units(m, f, prefix);
|
|
|
|
manager_dump_jobs(m, f, prefix);
|
|
|
|
}
|
|
|
|
|
2017-11-20 21:20:44 +01:00
|
|
|
int manager_get_dump_string(Manager *m, char **ret) {
|
|
|
|
_cleanup_free_ char *dump = NULL;
|
|
|
|
_cleanup_fclose_ FILE *f = NULL;
|
|
|
|
size_t size;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(ret);
|
|
|
|
|
|
|
|
f = open_memstream(&dump, &size);
|
|
|
|
if (!f)
|
|
|
|
return -errno;
|
|
|
|
|
2017-12-11 19:50:30 +01:00
|
|
|
(void) __fsetlocking(f, FSETLOCKING_BYCALLER);
|
2017-11-20 21:20:44 +01:00
|
|
|
|
|
|
|
manager_dump(m, f, NULL);
|
|
|
|
|
|
|
|
r = fflush_and_check(f);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
f = safe_fclose(f);
|
|
|
|
|
2018-04-05 07:26:26 +02:00
|
|
|
*ret = TAKE_PTR(dump);
|
2017-11-20 21:20:44 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-01-20 05:03:52 +01:00
|
|
|
void manager_clear_jobs(Manager *m) {
|
|
|
|
Job *j;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
while ((j = hashmap_first(m->jobs)))
|
2012-04-24 11:21:03 +02:00
|
|
|
/* No need to recurse. We're cancelling all jobs. */
|
2016-05-16 17:24:51 +02:00
|
|
|
job_finish_and_invalidate(j, JOB_CANCELED, false, false);
|
2010-01-20 05:03:52 +01:00
|
|
|
}
|
2010-01-23 22:56:47 +01:00
|
|
|
|
2013-11-25 15:22:41 +01:00
|
|
|
static int manager_dispatch_run_queue(sd_event_source *source, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
2010-01-23 22:56:47 +01:00
|
|
|
Job *j;
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2013-11-25 15:22:41 +01:00
|
|
|
assert(source);
|
|
|
|
assert(m);
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
while ((j = m->run_queue)) {
|
2010-01-26 19:25:02 +01:00
|
|
|
assert(j->installed);
|
2010-01-26 04:18:44 +01:00
|
|
|
assert(j->in_run_queue);
|
|
|
|
|
|
|
|
job_run_and_invalidate(j);
|
2010-01-24 00:39:29 +01:00
|
|
|
}
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2013-03-04 14:38:51 +01:00
|
|
|
if (m->n_running_jobs > 0)
|
2013-02-28 00:03:22 +01:00
|
|
|
manager_watch_jobs_in_progress(m);
|
|
|
|
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
if (m->n_on_console > 0)
|
|
|
|
manager_watch_idle_pipe(m);
|
|
|
|
|
2013-11-25 15:22:41 +01:00
|
|
|
return 1;
|
2010-02-05 00:38:41 +01:00
|
|
|
}
|
|
|
|
|
2013-11-08 18:11:09 +01:00
|
|
|
static unsigned manager_dispatch_dbus_queue(Manager *m) {
|
2018-02-13 18:30:34 +01:00
|
|
|
unsigned n = 0, budget;
|
2012-01-15 12:37:16 +01:00
|
|
|
Unit *u;
|
2018-02-13 18:30:34 +01:00
|
|
|
Job *j;
|
2010-02-05 00:38:41 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
if (m->dispatching_dbus_queue)
|
|
|
|
return 0;
|
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
/* Anything to do at all? */
|
|
|
|
if (!m->dbus_unit_queue && !m->dbus_job_queue && !m->send_reloading_done && !m->queued_message)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Do we have overly many messages queued at the moment? If so, let's not enqueue more on top, let's sit this
|
|
|
|
* cycle out, and process things in a later cycle when the queues got a bit emptier. */
|
|
|
|
if (manager_bus_n_queued_write(m) > MANAGER_BUS_BUSY_THRESHOLD)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Only process a certain number of units/jobs per event loop iteration. Even if the bus queue wasn't overly
|
|
|
|
* full before this call we shouldn't increase it in size too wildly in one step, and we shouldn't monopolize
|
|
|
|
* CPU time with generating these messages. Note the difference in counting of this "budget" and the
|
|
|
|
* "threshold" above: the "budget" is decreased only once per generated message, regardless how many
|
|
|
|
* busses/direct connections it is enqueued on, while the "threshold" is applied to each queued instance of bus
|
|
|
|
* message, i.e. if the same message is enqueued to five busses/direct connections it will be counted five
|
|
|
|
* times. This difference in counting ("references" vs. "instances") is primarily a result of the fact that
|
|
|
|
* it's easier to implement it this way, however it also reflects the thinking that the "threshold" should put
|
|
|
|
* a limit on used queue memory, i.e. space, while the "budget" should put a limit on time. Also note that
|
|
|
|
* the "threshold" is currently chosen much higher than the "budget". */
|
|
|
|
budget = MANAGER_BUS_MESSAGE_BUDGET;
|
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
m->dispatching_dbus_queue = true;
|
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
while (budget > 0 && (u = m->dbus_unit_queue)) {
|
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
assert(u->in_dbus_queue);
|
2010-02-05 00:38:41 +01:00
|
|
|
|
2012-01-15 12:37:16 +01:00
|
|
|
bus_unit_send_change_signal(u);
|
2018-02-13 18:30:34 +01:00
|
|
|
n++, budget--;
|
2010-02-05 00:38:41 +01:00
|
|
|
}
|
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
while (budget > 0 && (j = m->dbus_job_queue)) {
|
2010-02-05 00:38:41 +01:00
|
|
|
assert(j->in_dbus_queue);
|
|
|
|
|
|
|
|
bus_job_send_change_signal(j);
|
2018-02-13 18:30:34 +01:00
|
|
|
n++, budget--;
|
2010-02-05 00:38:41 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
m->dispatching_dbus_queue = false;
|
2013-07-10 21:10:53 +02:00
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
if (budget > 0 && m->send_reloading_done) {
|
2013-07-10 21:10:53 +02:00
|
|
|
m->send_reloading_done = false;
|
2013-11-19 21:12:59 +01:00
|
|
|
bus_manager_send_reloading(m, false);
|
2018-02-13 18:30:34 +01:00
|
|
|
n++, budget--;
|
2013-07-10 21:10:53 +02:00
|
|
|
}
|
|
|
|
|
2018-02-13 18:30:34 +01:00
|
|
|
if (budget > 0 && m->queued_message) {
|
2013-11-19 21:12:59 +01:00
|
|
|
bus_send_queued_message(m);
|
2018-02-13 18:30:34 +01:00
|
|
|
n++;
|
|
|
|
}
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
return n;
|
2010-01-24 00:39:29 +01:00
|
|
|
}
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
static int manager_dispatch_cgroups_agent_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
|
|
|
char buf[PATH_MAX+1];
|
|
|
|
ssize_t n;
|
|
|
|
|
|
|
|
n = recv(fd, buf, sizeof(buf), 0);
|
|
|
|
if (n < 0)
|
|
|
|
return log_error_errno(errno, "Failed to read cgroups agent message: %m");
|
|
|
|
if (n == 0) {
|
|
|
|
log_error("Got zero-length cgroups agent message, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if ((size_t) n >= sizeof(buf)) {
|
|
|
|
log_error("Got overly long cgroups agent message, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memchr(buf, 0, n)) {
|
|
|
|
log_error("Got cgroups agent message with embedded NUL byte, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
buf[n] = 0;
|
|
|
|
|
|
|
|
manager_notify_cgroup_empty(m, buf);
|
2017-09-08 17:24:57 +02:00
|
|
|
(void) bus_forward_agent_released(m, buf);
|
2016-05-04 20:43:23 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-01-05 12:20:22 +01:00
|
|
|
static void manager_invoke_notify_message(
|
|
|
|
Manager *m,
|
|
|
|
Unit *u,
|
|
|
|
const struct ucred *ucred,
|
|
|
|
const char *buf,
|
|
|
|
FDSet *fds) {
|
|
|
|
|
2014-02-07 11:58:25 +01:00
|
|
|
assert(m);
|
|
|
|
assert(u);
|
2018-01-05 12:20:22 +01:00
|
|
|
assert(ucred);
|
2014-02-07 11:58:25 +01:00
|
|
|
assert(buf);
|
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
if (u->notifygen == m->notifygen) /* Already invoked on this same unit in this same iteration? */
|
2014-02-07 11:58:25 +01:00
|
|
|
return;
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
u->notifygen = m->notifygen;
|
|
|
|
|
|
|
|
if (UNIT_VTABLE(u)->notify_message) {
|
|
|
|
_cleanup_strv_free_ char **tags = NULL;
|
|
|
|
|
|
|
|
tags = strv_split(buf, NEWLINE);
|
|
|
|
if (!tags) {
|
|
|
|
log_oom();
|
|
|
|
return;
|
|
|
|
}
|
2014-02-07 11:58:25 +01:00
|
|
|
|
2018-01-05 12:20:22 +01:00
|
|
|
UNIT_VTABLE(u)->notify_message(u, ucred, tags, fds);
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
|
|
|
|
} else if (DEBUG_LOGGING) {
|
2016-09-29 16:07:41 +02:00
|
|
|
_cleanup_free_ char *x = NULL, *y = NULL;
|
|
|
|
|
2018-01-04 21:00:10 +01:00
|
|
|
x = ellipsize(buf, 20, 90);
|
2016-09-29 16:07:41 +02:00
|
|
|
if (x)
|
2018-01-04 21:00:10 +01:00
|
|
|
y = cescape(x);
|
|
|
|
|
2016-09-29 16:07:41 +02:00
|
|
|
log_unit_debug(u, "Got notification message \"%s\", ignoring.", strnull(y));
|
|
|
|
}
|
2014-02-07 11:58:25 +01:00
|
|
|
}
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_notify_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
2016-05-23 15:57:18 +02:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
_cleanup_fdset_free_ FDSet *fds = NULL;
|
2013-11-19 21:12:59 +01:00
|
|
|
Manager *m = userdata;
|
2015-10-28 19:11:36 +01:00
|
|
|
char buf[NOTIFY_BUFFER_MAX+1];
|
|
|
|
struct iovec iovec = {
|
|
|
|
.iov_base = buf,
|
|
|
|
.iov_len = sizeof(buf)-1,
|
|
|
|
};
|
|
|
|
union {
|
|
|
|
struct cmsghdr cmsghdr;
|
|
|
|
uint8_t buf[CMSG_SPACE(sizeof(struct ucred)) +
|
|
|
|
CMSG_SPACE(sizeof(int) * NOTIFY_FD_MAX)];
|
|
|
|
} control = {};
|
|
|
|
struct msghdr msghdr = {
|
|
|
|
.msg_iov = &iovec,
|
|
|
|
.msg_iovlen = 1,
|
|
|
|
.msg_control = &control,
|
|
|
|
.msg_controllen = sizeof(control),
|
|
|
|
};
|
|
|
|
|
|
|
|
struct cmsghdr *cmsg;
|
|
|
|
struct ucred *ucred = NULL;
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
_cleanup_free_ Unit **array_copy = NULL;
|
|
|
|
Unit *u1, *u2, **array;
|
2015-10-28 19:11:36 +01:00
|
|
|
int r, *fd_array = NULL;
|
tree-wide: be more careful with the type of array sizes
Previously we were a bit sloppy with the index and size types of arrays,
we'd regularly use unsigned. While I don't think this ever resulted in
real issues I think we should be more careful there and follow a
stricter regime: unless there's a strong reason not to use size_t for
array sizes and indexes, size_t it should be. Any allocations we do
ultimately will use size_t anyway, and converting forth and back between
unsigned and size_t will always be a source of problems.
Note that on 32bit machines "unsigned" and "size_t" are equivalent, and
on 64bit machines our arrays shouldn't grow that large anyway, and if
they do we have a problem, however that kind of overly large allocation
we have protections for usually, but for overflows we do not have that
so much, hence let's add it.
So yeah, it's a story of the current code being already "good enough",
but I think some extra type hygiene is better.
This patch tries to be comprehensive, but it probably isn't and I missed
a few cases. But I guess we can cover that later as we notice it. Among
smaller fixes, this changes:
1. strv_length()' return type becomes size_t
2. the unit file changes array size becomes size_t
3. DNS answer and query array sizes become size_t
Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=76745
2018-04-27 14:09:31 +02:00
|
|
|
size_t n_fds = 0;
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
bool found = false;
|
2010-06-16 05:10:31 +02:00
|
|
|
ssize_t n;
|
|
|
|
|
|
|
|
assert(m);
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m->notify_fd == fd);
|
|
|
|
|
|
|
|
if (revents != EPOLLIN) {
|
|
|
|
log_warning("Got unexpected poll event for notify fd.");
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2016-10-07 12:12:10 +02:00
|
|
|
n = recvmsg(m->notify_fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC|MSG_TRUNC);
|
2015-10-28 19:11:36 +01:00
|
|
|
if (n < 0) {
|
2016-10-07 12:08:51 +02:00
|
|
|
if (IN_SET(errno, EAGAIN, EINTR))
|
|
|
|
return 0; /* Spurious wakeup, try again */
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2016-10-07 12:08:51 +02:00
|
|
|
/* If this is any other, real error, then let's stop processing this socket. This of course means we
|
|
|
|
* won't take notification messages anymore, but that's still better than busy looping around this:
|
|
|
|
* being woken up over and over again but being unable to actually read the message off the socket. */
|
|
|
|
return log_error_errno(errno, "Failed to receive notification message: %m");
|
2015-10-28 19:11:36 +01:00
|
|
|
}
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
CMSG_FOREACH(cmsg, &msghdr) {
|
|
|
|
if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
fd_array = (int*) CMSG_DATA(cmsg);
|
|
|
|
n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
} else if (cmsg->cmsg_level == SOL_SOCKET &&
|
|
|
|
cmsg->cmsg_type == SCM_CREDENTIALS &&
|
|
|
|
cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
ucred = (struct ucred*) CMSG_DATA(cmsg);
|
2015-01-06 00:26:25 +01:00
|
|
|
}
|
2015-10-28 19:11:36 +01:00
|
|
|
}
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
if (n_fds > 0) {
|
|
|
|
assert(fd_array);
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
r = fdset_new_array(&fds, fd_array, n_fds);
|
|
|
|
if (r < 0) {
|
|
|
|
close_many(fd_array, n_fds);
|
2016-09-29 19:44:34 +02:00
|
|
|
log_oom();
|
|
|
|
return 0;
|
2015-01-06 00:26:25 +01:00
|
|
|
}
|
2015-10-28 19:11:36 +01:00
|
|
|
}
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2018-01-05 12:19:22 +01:00
|
|
|
if (!ucred || !pid_is_valid(ucred->pid)) {
|
2015-10-28 19:11:36 +01:00
|
|
|
log_warning("Received notify message without valid credentials. Ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2016-10-07 12:12:10 +02:00
|
|
|
if ((size_t) n >= sizeof(buf) || (msghdr.msg_flags & MSG_TRUNC)) {
|
2015-10-28 19:11:36 +01:00
|
|
|
log_warning("Received notify message exceeded maximum size. Ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2016-10-07 12:14:33 +02:00
|
|
|
/* As extra safety check, let's make sure the string we get doesn't contain embedded NUL bytes. We permit one
|
|
|
|
* trailing NUL byte in the message, but don't expect it. */
|
|
|
|
if (n > 1 && memchr(buf, 0, n-1)) {
|
|
|
|
log_warning("Received notify message with embedded NUL bytes. Ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure it's NUL-terminated. */
|
2015-10-28 19:11:36 +01:00
|
|
|
buf[n] = 0;
|
2010-06-16 05:10:31 +02:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
/* Increase the generation counter used for filtering out duplicate unit invocations. */
|
|
|
|
m->notifygen++;
|
|
|
|
|
|
|
|
/* Notify every unit that might be interested, which might be multiple. */
|
2015-10-28 19:11:36 +01:00
|
|
|
u1 = manager_get_unit_by_pid_cgroup(m, ucred->pid);
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
u2 = hashmap_get(m->watch_pids, PID_TO_PTR(ucred->pid));
|
|
|
|
array = hashmap_get(m->watch_pids, PID_TO_PTR(-ucred->pid));
|
|
|
|
if (array) {
|
|
|
|
size_t k = 0;
|
2014-02-07 11:58:25 +01:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
while (array[k])
|
|
|
|
k++;
|
2014-02-07 11:58:25 +01:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
array_copy = newdup(Unit*, array, k+1);
|
|
|
|
if (!array_copy)
|
|
|
|
log_oom();
|
|
|
|
}
|
|
|
|
/* And now invoke the per-unit callbacks. Note that manager_invoke_notify_message() will handle duplicate units
|
|
|
|
* make sure we only invoke each unit's handler once. */
|
|
|
|
if (u1) {
|
|
|
|
manager_invoke_notify_message(m, u1, ucred, buf, fds);
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
if (u2) {
|
|
|
|
manager_invoke_notify_message(m, u2, ucred, buf, fds);
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
if (array_copy)
|
|
|
|
for (size_t i = 0; array_copy[i]; i++) {
|
|
|
|
manager_invoke_notify_message(m, array_copy[i], ucred, buf, fds);
|
|
|
|
found = true;
|
|
|
|
}
|
2010-06-16 05:10:31 +02:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
if (!found)
|
|
|
|
log_warning("Cannot find unit for notify message of PID "PID_FMT", ignoring.", ucred->pid);
|
2015-01-06 00:26:25 +01:00
|
|
|
|
2015-10-28 19:11:36 +01:00
|
|
|
if (fdset_size(fds) > 0)
|
2016-09-30 13:35:07 +02:00
|
|
|
log_warning("Got extra auxiliary fds with notification message, closing them.");
|
2010-06-16 05:10:31 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
static void manager_invoke_sigchld_event(
|
|
|
|
Manager *m,
|
|
|
|
Unit *u,
|
|
|
|
const siginfo_t *si) {
|
2016-06-30 21:12:18 +02:00
|
|
|
|
2014-02-07 11:58:25 +01:00
|
|
|
assert(m);
|
|
|
|
assert(u);
|
|
|
|
assert(si);
|
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
/* Already invoked the handler of this unit in this iteration? Then don't process this again */
|
|
|
|
if (u->sigchldgen == m->sigchldgen)
|
|
|
|
return;
|
|
|
|
u->sigchldgen = m->sigchldgen;
|
2014-02-07 11:58:25 +01:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
log_unit_debug(u, "Child "PID_FMT" belongs to %s.", si->si_pid, u->id);
|
2014-02-07 11:58:25 +01:00
|
|
|
unit_unwatch_pid(u, si->si_pid);
|
2016-03-25 16:38:50 +01:00
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
if (UNIT_VTABLE(u)->sigchld_event)
|
|
|
|
UNIT_VTABLE(u)->sigchld_event(u, si->si_pid, si->si_code, si->si_status);
|
2014-02-07 11:58:25 +01:00
|
|
|
}
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
static int manager_dispatch_sigchld(sd_event_source *source, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
|
|
|
siginfo_t si = {};
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(source);
|
2010-01-24 00:39:29 +01:00
|
|
|
assert(m);
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
/* First we call waitd() for a PID and do not reap the zombie. That way we can still access /proc/$PID for it
|
|
|
|
* while it is a zombie. */
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
if (waitid(P_ALL, 0, &si, WEXITED|WNOHANG|WNOWAIT) < 0) {
|
2010-01-27 04:31:52 +01:00
|
|
|
|
2018-02-13 19:04:31 +01:00
|
|
|
if (errno != ECHILD)
|
|
|
|
log_error_errno(errno, "Failed to peek for child with waitid(), ignoring: %m");
|
2010-01-27 04:31:52 +01:00
|
|
|
|
2018-02-13 19:04:31 +01:00
|
|
|
goto turn_off;
|
2018-01-23 18:18:13 +01:00
|
|
|
}
|
2010-04-13 02:05:27 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
if (si.si_pid <= 0)
|
|
|
|
goto turn_off;
|
|
|
|
|
|
|
|
if (IN_SET(si.si_code, CLD_EXITED, CLD_KILLED, CLD_DUMPED)) {
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
_cleanup_free_ Unit **array_copy = NULL;
|
2018-01-23 18:18:13 +01:00
|
|
|
_cleanup_free_ char *name = NULL;
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
Unit *u1, *u2, **array;
|
2018-01-23 18:18:13 +01:00
|
|
|
|
|
|
|
(void) get_process_comm(si.si_pid, &name);
|
|
|
|
|
|
|
|
log_debug("Child "PID_FMT" (%s) died (code=%s, status=%i/%s)",
|
|
|
|
si.si_pid, strna(name),
|
|
|
|
sigchld_code_to_string(si.si_code),
|
|
|
|
si.si_status,
|
|
|
|
strna(si.si_code == CLD_EXITED
|
|
|
|
? exit_status_to_string(si.si_status, EXIT_STATUS_FULL)
|
|
|
|
: signal_to_string(si.si_status)));
|
|
|
|
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
/* Increase the generation counter used for filtering out duplicate unit invocations */
|
|
|
|
m->sigchldgen++;
|
|
|
|
|
|
|
|
/* And now figure out the unit this belongs to, it might be multiple... */
|
2018-01-23 18:18:13 +01:00
|
|
|
u1 = manager_get_unit_by_pid_cgroup(m, si.si_pid);
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
u2 = hashmap_get(m->watch_pids, PID_TO_PTR(si.si_pid));
|
|
|
|
array = hashmap_get(m->watch_pids, PID_TO_PTR(-si.si_pid));
|
|
|
|
if (array) {
|
|
|
|
size_t n = 0;
|
|
|
|
|
|
|
|
/* Cound how many entries the array has */
|
|
|
|
while (array[n])
|
|
|
|
n++;
|
|
|
|
|
|
|
|
/* Make a copy of the array so that we don't trip up on the array changing beneath us */
|
|
|
|
array_copy = newdup(Unit*, array, n+1);
|
|
|
|
if (!array_copy)
|
|
|
|
log_oom();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Finally, execute them all. Note that u1, u2 and the array might contain duplicates, but
|
|
|
|
* that's fine, manager_invoke_sigchld_event() will ensure we only invoke the handlers once for
|
|
|
|
* each iteration. */
|
2018-01-23 18:18:13 +01:00
|
|
|
if (u1)
|
core: rework how we track which PIDs to watch for a unit
Previously, we'd maintain two hashmaps keyed by PIDs, pointing to Unit
interested in SIGCHLD events for them. This scheme allowed a specific
PID to be watched by exactly 0, 1 or 2 units.
With this rework this is replaced by a single hashmap which is primarily
keyed by the PID and points to a Unit interested in it. However, it
optionally also keyed by the negated PID, in which case it points to a
NULL terminated array of additional Unit objects also interested. This
scheme means arbitrary numbers of Units may now watch the same PID.
Runtime and memory behaviour should not be impact by this change, as for
the common case (i.e. each PID only watched by a single unit) behaviour
stays the same, but for the uncommon case (a PID watched by more than
one unit) we only pay with a single additional memory allocation for the
array.
Why this all? Primarily, because allowing exactly two units to watch a
specific PID is not sufficient for some niche cases, as processes can
belong to more than one unit these days:
1. sd_notify() with MAINPID= can be used to attach a process from a
different cgroup to multiple units.
2. Similar, the PIDFile= setting in unit files can be used for similar
setups,
3. By creating a scope unit a main process of a service may join a
different unit, too.
4. On cgroupsv1 we frequently end up watching all processes remaining in
a scope, and if a process opens lots of scopes one after the other it
might thus end up being watch by many of them.
This patch hence removes the 2-unit-per-PID limit. It also makes a
couple of other changes, some of them quite relevant:
- manager_get_unit_by_pid() (and the bus call wrapping it) when there's
ambiguity will prefer returning the Unit the process belongs to based on
cgroup membership, and only check the watch-pids hashmap if that
fails. This change in logic is probably more in line with what people
expect and makes things more stable as each process can belong to
exactly one cgroup only.
- Every SIGCHLD event is now dispatched to all units interested in its
PID. Previously, there was some magic conditionalization: the SIGCHLD
would only be dispatched to the unit if it was only interested in a
single PID only, or the PID belonged to the control or main PID or we
didn't dispatch a signle SIGCHLD to the unit in the current event loop
iteration yet. These rules were quite arbitrary and also redundant as
the the per-unit handlers would filter the PIDs anyway a second time.
With this change we'll hence relax the rules: all we do now is
dispatch every SIGCHLD event exactly once to each unit interested in
it, and it's up to the unit to then use or ignore this. We use a
generation counter in the unit to ensure that we only invoke the unit
handler once for each event, protecting us from confusion if a unit is
both associated with a specific PID through cgroup membership and
through the "watch_pids" logic. It also protects us from being
confused if the "watch_pids" hashmap is altered while we are
dispatching to it (which is a very likely case).
- sd_notify() message dispatching has been reworked to be very similar
to SIGCHLD handling now. A generation counter is used for dispatching
as well.
This also adds a new test that validates that "watch_pid" registration
and unregstration works correctly.
2018-01-12 13:41:05 +01:00
|
|
|
manager_invoke_sigchld_event(m, u1, &si);
|
|
|
|
if (u2)
|
|
|
|
manager_invoke_sigchld_event(m, u2, &si);
|
|
|
|
if (array_copy)
|
|
|
|
for (size_t i = 0; array_copy[i]; i++)
|
|
|
|
manager_invoke_sigchld_event(m, array_copy[i], &si);
|
2018-01-23 18:18:13 +01:00
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
/* And now, we actually reap the zombie. */
|
|
|
|
if (waitid(P_PID, si.si_pid, &si, WEXITED) < 0) {
|
|
|
|
log_error_errno(errno, "Failed to dequeue child, ignoring: %m");
|
|
|
|
return 0;
|
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
return 0;
|
2010-06-16 05:10:31 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
turn_off:
|
|
|
|
/* All children processed for now, turn off event source */
|
2010-04-13 02:05:27 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_OFF);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to disable SIGCHLD event source: %m");
|
2010-01-24 00:39:29 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-02 17:19:22 +02:00
|
|
|
static void manager_start_target(Manager *m, const char *name, JobMode mode) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
2010-04-13 01:59:06 +02:00
|
|
|
int r;
|
2010-07-08 02:43:18 +02:00
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_debug("Activating special unit %s", name);
|
2010-07-10 04:50:19 +02:00
|
|
|
|
2015-11-12 19:52:31 +01:00
|
|
|
r = manager_add_job_by_name(m, JOB_START, name, mode, &error, NULL);
|
2013-01-02 22:03:35 +01:00
|
|
|
if (r < 0)
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_error("Failed to enqueue %s job: %s", name, bus_error_message(&error, r));
|
2010-04-13 01:59:06 +02:00
|
|
|
}
|
|
|
|
|
2016-10-07 03:08:21 +02:00
|
|
|
static void manager_handle_ctrl_alt_del(Manager *m) {
|
|
|
|
/* If the user presses C-A-D more than
|
|
|
|
* 7 times within 2s, we reboot/shutdown immediately,
|
|
|
|
* unless it was disabled in system.conf */
|
|
|
|
|
2018-05-11 11:16:52 +02:00
|
|
|
if (ratelimit_below(&m->ctrl_alt_del_ratelimit) || m->cad_burst_action == EMERGENCY_ACTION_NONE)
|
2016-10-07 03:08:21 +02:00
|
|
|
manager_start_target(m, SPECIAL_CTRL_ALT_DEL_TARGET, JOB_REPLACE_IRREVERSIBLY);
|
2016-10-18 12:16:32 +02:00
|
|
|
else
|
|
|
|
emergency_action(m, m->cad_burst_action, NULL,
|
|
|
|
"Ctrl-Alt-Del was pressed more than 7 times within 2s");
|
2016-10-07 03:08:21 +02:00
|
|
|
}
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_signal_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
2010-01-24 00:39:29 +01:00
|
|
|
ssize_t n;
|
|
|
|
struct signalfd_siginfo sfsi;
|
2015-07-29 20:31:07 +02:00
|
|
|
int r;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
|
|
|
assert(m);
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m->signal_fd == fd);
|
|
|
|
|
|
|
|
if (revents != EPOLLIN) {
|
|
|
|
log_warning("Got unexpected events from signal file descriptor.");
|
|
|
|
return 0;
|
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
n = read(m->signal_fd, &sfsi, sizeof(sfsi));
|
|
|
|
if (n != sizeof(sfsi)) {
|
|
|
|
if (n >= 0) {
|
|
|
|
log_warning("Truncated read from signal fd (%zu bytes), ignoring!", n);
|
|
|
|
return 0;
|
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
if (IN_SET(errno, EINTR, EAGAIN))
|
|
|
|
return 0;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
/* We return an error here, which will kill this handler,
|
|
|
|
* to avoid a busy loop on read error. */
|
|
|
|
return log_error_errno(errno, "Reading from signal fd failed: %m");
|
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
log_received_signal(sfsi.ssi_signo == SIGCHLD ||
|
|
|
|
(sfsi.ssi_signo == SIGTERM && MANAGER_IS_USER(m))
|
|
|
|
? LOG_DEBUG : LOG_INFO,
|
|
|
|
&sfsi);
|
2010-07-10 04:50:19 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
switch (sfsi.ssi_signo) {
|
2010-01-27 04:36:30 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case SIGCHLD:
|
|
|
|
r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
|
|
|
|
if (r < 0)
|
2018-02-13 19:04:31 +01:00
|
|
|
log_warning_errno(r, "Failed to enable SIGCHLD event source, ignoring: %m");
|
2010-01-27 04:36:30 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
break;
|
2010-02-13 01:17:08 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case SIGTERM:
|
|
|
|
if (MANAGER_IS_SYSTEM(m)) {
|
2018-03-16 20:46:39 +01:00
|
|
|
/* This is for compatibility with the original sysvinit */
|
2018-01-23 18:18:13 +01:00
|
|
|
r = verify_run_space_and_log("Refusing to reexecute");
|
|
|
|
if (r >= 0)
|
|
|
|
m->exit_code = MANAGER_REEXECUTE;
|
2010-05-24 22:31:38 +02:00
|
|
|
break;
|
2018-01-23 18:18:13 +01:00
|
|
|
}
|
2010-02-13 01:17:08 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
_fallthrough_;
|
|
|
|
case SIGINT:
|
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
manager_handle_ctrl_alt_del(m);
|
|
|
|
else
|
|
|
|
manager_start_target(m, SPECIAL_EXIT_TARGET,
|
|
|
|
JOB_REPLACE_IRREVERSIBLY);
|
|
|
|
break;
|
2010-02-13 01:17:08 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case SIGWINCH:
|
2018-03-16 20:46:39 +01:00
|
|
|
/* This is a nop on non-init */
|
2018-01-23 18:18:13 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
manager_start_target(m, SPECIAL_KBREQUEST_TARGET, JOB_REPLACE);
|
2010-02-13 01:17:08 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
break;
|
2010-02-13 01:17:08 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case SIGPWR:
|
2018-03-16 20:46:39 +01:00
|
|
|
/* This is a nop on non-init */
|
2018-01-23 18:18:13 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
manager_start_target(m, SPECIAL_SIGPWR_TARGET, JOB_REPLACE);
|
2010-01-27 05:31:53 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
break;
|
2010-04-13 03:20:22 +02:00
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
case SIGUSR1:
|
|
|
|
if (manager_dbus_is_running(m, false)) {
|
2018-01-23 18:18:13 +01:00
|
|
|
log_info("Trying to reconnect to bus...");
|
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
(void) bus_init_api(m);
|
|
|
|
|
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
(void) bus_init_system(m);
|
|
|
|
} else {
|
|
|
|
log_info("Starting D-Bus service...");
|
2018-01-23 18:18:13 +01:00
|
|
|
manager_start_target(m, SPECIAL_DBUS_SERVICE, JOB_REPLACE);
|
|
|
|
}
|
2010-04-13 03:20:22 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SIGUSR2: {
|
|
|
|
_cleanup_free_ char *dump = NULL;
|
|
|
|
|
|
|
|
r = manager_get_dump_string(m, &dump);
|
|
|
|
if (r < 0) {
|
|
|
|
log_warning_errno(errno, "Failed to acquire manager dump: %m");
|
2010-04-13 03:20:22 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
log_dump(LOG_INFO, dump);
|
|
|
|
break;
|
|
|
|
}
|
2010-06-04 19:45:53 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case SIGHUP:
|
|
|
|
r = verify_run_space_and_log("Refusing to reload");
|
|
|
|
if (r >= 0)
|
|
|
|
m->exit_code = MANAGER_RELOAD;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default: {
|
|
|
|
|
|
|
|
/* Starting SIGRTMIN+0 */
|
|
|
|
static const struct {
|
|
|
|
const char *target;
|
|
|
|
JobMode mode;
|
|
|
|
} target_table[] = {
|
|
|
|
[0] = { SPECIAL_DEFAULT_TARGET, JOB_ISOLATE },
|
|
|
|
[1] = { SPECIAL_RESCUE_TARGET, JOB_ISOLATE },
|
|
|
|
[2] = { SPECIAL_EMERGENCY_TARGET, JOB_ISOLATE },
|
|
|
|
[3] = { SPECIAL_HALT_TARGET, JOB_REPLACE_IRREVERSIBLY },
|
|
|
|
[4] = { SPECIAL_POWEROFF_TARGET, JOB_REPLACE_IRREVERSIBLY },
|
|
|
|
[5] = { SPECIAL_REBOOT_TARGET, JOB_REPLACE_IRREVERSIBLY },
|
|
|
|
[6] = { SPECIAL_KEXEC_TARGET, JOB_REPLACE_IRREVERSIBLY },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Starting SIGRTMIN+13, so that target halt and system halt are 10 apart */
|
|
|
|
static const ManagerExitCode code_table[] = {
|
|
|
|
[0] = MANAGER_HALT,
|
|
|
|
[1] = MANAGER_POWEROFF,
|
|
|
|
[2] = MANAGER_REBOOT,
|
|
|
|
[3] = MANAGER_KEXEC,
|
|
|
|
};
|
manager: flush memory stream before using the buffer
When the manager receives a SIGUSR2 signal, it opens a memory stream
with open_memstream(), uses the returned file handle for logging, and
dumps the logged content with log_dump().
However, the char* buffer is only safe to use after the file handle has
been flushed with fflush, as the man pages states:
When the stream is closed (fclose(3)) or flushed (fflush(3)), the
locations pointed to by ptr and sizeloc are updated to contain,
respectively, a pointer to the buffer and the current size of the
buffer.
These values remain valid only as long as the caller performs no
further output on the stream. If further output is performed, then the
stream must again be flushed before trying to access these variables.
Without that call, dump remains NULL and the daemon crashes in
log_dump().
2014-03-07 14:43:59 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
if ((int) sfsi.ssi_signo >= SIGRTMIN+0 &&
|
|
|
|
(int) sfsi.ssi_signo < SIGRTMIN+(int) ELEMENTSOF(target_table)) {
|
|
|
|
int idx = (int) sfsi.ssi_signo - SIGRTMIN;
|
|
|
|
manager_start_target(m, target_table[idx].target,
|
|
|
|
target_table[idx].mode);
|
2010-04-24 04:27:05 +02:00
|
|
|
break;
|
2010-06-04 19:45:53 +02:00
|
|
|
}
|
2010-04-24 04:27:05 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
if ((int) sfsi.ssi_signo >= SIGRTMIN+13 &&
|
|
|
|
(int) sfsi.ssi_signo < SIGRTMIN+13+(int) ELEMENTSOF(code_table)) {
|
|
|
|
m->exit_code = code_table[sfsi.ssi_signo - SIGRTMIN - 13];
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (sfsi.ssi_signo - SIGRTMIN) {
|
|
|
|
|
|
|
|
case 20:
|
|
|
|
manager_set_show_status(m, SHOW_STATUS_YES);
|
2010-04-21 03:27:44 +02:00
|
|
|
break;
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case 21:
|
|
|
|
manager_set_show_status(m, SHOW_STATUS_NO);
|
|
|
|
break;
|
2010-06-17 23:22:56 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case 22:
|
|
|
|
log_set_max_level(LOG_DEBUG);
|
|
|
|
log_info("Setting log level to debug.");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 23:
|
|
|
|
log_set_max_level(LOG_INFO);
|
|
|
|
log_info("Setting log level to info.");
|
|
|
|
break;
|
2010-10-14 00:54:48 +02:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
case 24:
|
|
|
|
if (MANAGER_IS_USER(m)) {
|
|
|
|
m->exit_code = MANAGER_EXIT;
|
|
|
|
return 0;
|
2011-02-09 12:12:30 +01:00
|
|
|
}
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
/* This is a nop on init */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 26:
|
|
|
|
case 29: /* compatibility: used to be mapped to LOG_TARGET_SYSLOG_OR_KMSG */
|
|
|
|
log_set_target(LOG_TARGET_JOURNAL_OR_KMSG);
|
|
|
|
log_notice("Setting log target to journal-or-kmsg.");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 27:
|
|
|
|
log_set_target(LOG_TARGET_CONSOLE);
|
|
|
|
log_notice("Setting log target to console.");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 28:
|
|
|
|
log_set_target(LOG_TARGET_KMSG);
|
|
|
|
log_notice("Setting log target to kmsg.");
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
log_warning("Got unhandled signal <%s>.", signal_to_string(sfsi.ssi_signo));
|
|
|
|
}
|
|
|
|
}}
|
2010-01-26 04:18:44 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_time_change_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
|
|
|
Iterator i;
|
|
|
|
Unit *u;
|
2010-01-26 04:18:44 +01:00
|
|
|
|
|
|
|
assert(m);
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m->time_change_fd == fd);
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2016-12-18 13:21:19 +01:00
|
|
|
log_struct(LOG_DEBUG,
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
"MESSAGE_ID=" SD_MESSAGE_TIME_CHANGE_STR,
|
2014-11-28 02:05:14 +01:00
|
|
|
LOG_MESSAGE("Time has been changed"),
|
2013-11-19 21:12:59 +01:00
|
|
|
NULL);
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
/* Restart the watch */
|
|
|
|
m->time_change_event_source = sd_event_source_unref(m->time_change_event_source);
|
2014-03-18 19:22:43 +01:00
|
|
|
m->time_change_fd = safe_close(m->time_change_fd);
|
2010-01-29 06:04:08 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
manager_setup_time_change(m);
|
2010-10-18 23:09:09 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
HASHMAP_FOREACH(u, m->units, i)
|
|
|
|
if (UNIT_VTABLE(u)->time_change)
|
|
|
|
UNIT_VTABLE(u)->time_change(u);
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_idle_pipe_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
|
|
|
assert(m->idle_pipe[2] == fd);
|
2012-11-25 00:32:40 +01:00
|
|
|
|
2018-01-24 19:52:14 +01:00
|
|
|
/* There's at least one Type=idle child that just gave up on us waiting for the boot process to complete. Let's
|
|
|
|
* now turn off any further console output if there's at least one service that needs console access, so that
|
|
|
|
* from now on our own output should not spill into that service's output anymore. After all, we support
|
|
|
|
* Type=idle only to beautify console output and it generally is set on services that want to own the console
|
|
|
|
* exclusively without our interference. */
|
2013-11-19 21:12:59 +01:00
|
|
|
m->no_console_output = m->n_on_console > 0;
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2018-01-24 19:52:14 +01:00
|
|
|
/* Acknowledge the child's request, and let all all other children know too that they shouldn't wait any longer
|
|
|
|
* by closing the pipes towards them, which is what they are waiting for. */
|
2013-11-19 21:12:59 +01:00
|
|
|
manager_close_idle_pipe(m);
|
2013-02-28 00:03:22 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return 0;
|
|
|
|
}
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int manager_dispatch_jobs_in_progress(sd_event_source *source, usec_t usec, void *userdata) {
|
|
|
|
Manager *m = userdata;
|
2014-01-27 07:15:27 +01:00
|
|
|
int r;
|
|
|
|
uint64_t next;
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(m);
|
2014-01-27 07:15:27 +01:00
|
|
|
assert(source);
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
manager_print_jobs_in_progress(m);
|
2014-01-27 07:15:27 +01:00
|
|
|
|
|
|
|
next = now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_PERIOD_USEC;
|
|
|
|
r = sd_event_source_set_time(source, next);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
return sd_event_source_set_enabled(source, SD_EVENT_ONESHOT);
|
2010-01-24 00:39:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int manager_loop(Manager *m) {
|
|
|
|
int r;
|
|
|
|
|
2011-03-30 20:15:45 +02:00
|
|
|
RATELIMIT_DEFINE(rl, 1*USEC_PER_SEC, 50000);
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2010-01-24 00:39:29 +01:00
|
|
|
assert(m);
|
2014-03-12 20:55:13 +01:00
|
|
|
m->exit_code = MANAGER_OK;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2010-07-11 00:52:00 +02:00
|
|
|
/* Release the path cache */
|
2015-10-28 18:23:07 +01:00
|
|
|
m->unit_path_cache = set_free_free(m->unit_path_cache);
|
2010-07-11 00:52:00 +02:00
|
|
|
|
2010-09-21 04:14:38 +02:00
|
|
|
manager_check_finished(m);
|
|
|
|
|
2018-01-23 18:18:13 +01:00
|
|
|
/* There might still be some zombies hanging around from before we were exec()'ed. Let's reap them. */
|
|
|
|
r = sd_event_source_set_enabled(m->sigchld_event_source, SD_EVENT_ON);
|
2012-04-05 22:08:10 +02:00
|
|
|
if (r < 0)
|
2018-01-23 18:18:13 +01:00
|
|
|
return log_error_errno(r, "Failed to enable SIGCHLD event source: %m");
|
2010-05-18 04:16:33 +02:00
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
while (m->exit_code == MANAGER_OK) {
|
2013-11-19 21:12:59 +01:00
|
|
|
usec_t wait_usec;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m))
|
2012-04-05 22:08:10 +02:00
|
|
|
watchdog_ping();
|
|
|
|
|
2018-05-11 11:16:52 +02:00
|
|
|
if (!ratelimit_below(&rl)) {
|
2010-02-01 03:33:24 +01:00
|
|
|
/* Yay, something is going seriously wrong, pause a little */
|
|
|
|
log_warning("Looping too fast. Throttling execution a little.");
|
|
|
|
sleep(1);
|
|
|
|
}
|
|
|
|
|
2010-05-16 03:57:56 +02:00
|
|
|
if (manager_dispatch_load_queue(m) > 0)
|
2010-04-06 02:43:58 +02:00
|
|
|
continue;
|
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
if (manager_dispatch_gc_job_queue(m) > 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (manager_dispatch_gc_unit_queue(m) > 0)
|
2010-04-21 06:01:13 +02:00
|
|
|
continue;
|
|
|
|
|
2013-07-02 17:41:57 +02:00
|
|
|
if (manager_dispatch_cleanup_queue(m) > 0)
|
2010-02-05 00:38:41 +01:00
|
|
|
continue;
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2017-09-26 22:15:02 +02:00
|
|
|
if (manager_dispatch_cgroup_realize_queue(m) > 0)
|
2010-02-05 00:38:41 +01:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (manager_dispatch_dbus_queue(m) > 0)
|
2010-02-01 03:33:24 +01:00
|
|
|
continue;
|
|
|
|
|
2012-04-13 21:33:28 +02:00
|
|
|
/* Sleep for half the watchdog time */
|
2016-02-24 21:24:23 +01:00
|
|
|
if (m->runtime_watchdog > 0 && m->runtime_watchdog != USEC_INFINITY && MANAGER_IS_SYSTEM(m)) {
|
2013-11-19 21:12:59 +01:00
|
|
|
wait_usec = m->runtime_watchdog / 2;
|
|
|
|
if (wait_usec <= 0)
|
|
|
|
wait_usec = 1;
|
2012-04-13 21:33:28 +02:00
|
|
|
} else
|
2014-07-29 12:23:31 +02:00
|
|
|
wait_usec = USEC_INFINITY;
|
2010-01-24 00:39:29 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
r = sd_event_run(m->event, wait_usec);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to run event loop: %m");
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
2010-01-27 22:40:10 +01:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
return m->exit_code;
|
2010-01-23 22:56:47 +01:00
|
|
|
}
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
int manager_load_unit_from_dbus_path(Manager *m, const char *s, sd_bus_error *e, Unit **_u) {
|
2013-07-02 01:35:08 +02:00
|
|
|
_cleanup_free_ char *n = NULL;
|
2016-08-30 23:18:46 +02:00
|
|
|
sd_id128_t invocation_id;
|
2010-02-01 03:33:24 +01:00
|
|
|
Unit *u;
|
2012-05-21 12:54:34 +02:00
|
|
|
int r;
|
2010-02-01 03:33:24 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(s);
|
|
|
|
assert(_u);
|
|
|
|
|
2013-07-02 01:35:08 +02:00
|
|
|
r = unit_name_from_dbus_path(s, &n);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-02-01 03:33:24 +01:00
|
|
|
|
2016-08-30 23:18:46 +02:00
|
|
|
/* Permit addressing units by invocation ID: if the passed bus path is suffixed by a 128bit ID then we use it
|
|
|
|
* as invocation ID. */
|
|
|
|
r = sd_id128_from_string(n, &invocation_id);
|
|
|
|
if (r >= 0) {
|
|
|
|
u = hashmap_get(m->units_by_invocation_id, &invocation_id);
|
|
|
|
if (u) {
|
|
|
|
*_u = u;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-13 22:04:12 +02:00
|
|
|
return sd_bus_error_setf(e, BUS_ERROR_NO_UNIT_FOR_INVOCATION_ID,
|
|
|
|
"No unit with the specified invocation ID " SD_ID128_FORMAT_STR " known.",
|
|
|
|
SD_ID128_FORMAT_VAL(invocation_id));
|
2016-08-30 23:18:46 +02:00
|
|
|
}
|
|
|
|
|
2017-06-21 20:45:23 +02:00
|
|
|
/* If this didn't work, we check if this is a unit name */
|
2018-05-13 22:04:12 +02:00
|
|
|
if (!unit_name_is_valid(n, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
|
|
|
|
_cleanup_free_ char *nn = NULL;
|
|
|
|
|
|
|
|
nn = cescape(n);
|
|
|
|
return sd_bus_error_setf(e, SD_BUS_ERROR_INVALID_ARGS,
|
|
|
|
"Unit name %s is neither a valid invocation ID nor unit name.", strnull(nn));
|
|
|
|
}
|
2017-06-21 20:45:23 +02:00
|
|
|
|
2012-05-21 12:54:34 +02:00
|
|
|
r = manager_load_unit(m, n, NULL, e, &u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-02-01 03:33:24 +01:00
|
|
|
|
|
|
|
*_u = u;
|
|
|
|
return 0;
|
|
|
|
}
|
2010-02-02 12:42:08 +01:00
|
|
|
|
|
|
|
int manager_get_job_from_dbus_path(Manager *m, const char *s, Job **_j) {
|
2013-11-19 21:12:59 +01:00
|
|
|
const char *p;
|
2010-02-02 12:42:08 +01:00
|
|
|
unsigned id;
|
2013-11-19 21:12:59 +01:00
|
|
|
Job *j;
|
2010-02-02 12:42:08 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(s);
|
|
|
|
assert(_j);
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
p = startswith(s, "/org/freedesktop/systemd1/job/");
|
|
|
|
if (!p)
|
2010-02-02 12:42:08 +01:00
|
|
|
return -EINVAL;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
r = safe_atou(p, &id);
|
2012-11-25 00:32:40 +01:00
|
|
|
if (r < 0)
|
2010-02-02 12:42:08 +01:00
|
|
|
return r;
|
|
|
|
|
2012-11-25 00:32:40 +01:00
|
|
|
j = manager_get_job(m, id);
|
|
|
|
if (!j)
|
2010-02-02 12:42:08 +01:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
*_j = j;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2010-02-12 21:57:39 +01:00
|
|
|
|
2010-08-11 01:43:23 +02:00
|
|
|
void manager_send_unit_audit(Manager *m, Unit *u, int type, bool success) {
|
2010-04-10 17:53:17 +02:00
|
|
|
|
2017-10-03 10:41:51 +02:00
|
|
|
#if HAVE_AUDIT
|
2014-01-12 21:55:10 +01:00
|
|
|
_cleanup_free_ char *p = NULL;
|
2014-11-04 00:47:44 +01:00
|
|
|
const char *msg;
|
2015-04-30 20:21:00 +02:00
|
|
|
int audit_fd, r;
|
2010-04-10 17:53:17 +02:00
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2015-10-28 18:23:26 +01:00
|
|
|
return;
|
|
|
|
|
2012-10-02 23:40:09 +02:00
|
|
|
audit_fd = get_audit_fd();
|
|
|
|
if (audit_fd < 0)
|
2010-04-10 17:53:17 +02:00
|
|
|
return;
|
|
|
|
|
2010-08-12 03:51:58 +02:00
|
|
|
/* Don't generate audit events if the service was already
|
|
|
|
* started and we're just deserializing */
|
2016-02-24 21:36:09 +01:00
|
|
|
if (MANAGER_IS_RELOADING(m))
|
2010-08-12 03:51:58 +02:00
|
|
|
return;
|
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
if (u->type != UNIT_SERVICE)
|
2011-03-18 04:31:22 +01:00
|
|
|
return;
|
|
|
|
|
2015-04-30 20:21:00 +02:00
|
|
|
r = unit_name_to_prefix_and_instance(u->id, &p);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to extract prefix and instance of unit name: %m");
|
2010-04-10 17:53:17 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-03 02:05:59 +01:00
|
|
|
msg = strjoina("unit=", p);
|
2014-11-04 00:47:44 +01:00
|
|
|
if (audit_log_user_comm_message(audit_fd, type, msg, "systemd", NULL, NULL, NULL, success) < 0) {
|
|
|
|
if (errno == EPERM)
|
2011-03-14 17:48:34 +01:00
|
|
|
/* We aren't allowed to send audit messages?
|
2012-04-13 17:17:56 +02:00
|
|
|
* Then let's not retry again. */
|
2012-10-02 23:40:09 +02:00
|
|
|
close_audit_fd();
|
2014-11-04 00:47:44 +01:00
|
|
|
else
|
2014-11-28 19:29:59 +01:00
|
|
|
log_warning_errno(errno, "Failed to send audit message: %m");
|
2011-03-14 17:48:34 +01:00
|
|
|
}
|
2010-08-11 01:43:23 +02:00
|
|
|
#endif
|
2010-04-10 17:53:17 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2010-10-06 03:55:49 +02:00
|
|
|
void manager_send_unit_plymouth(Manager *m, Unit *u) {
|
2016-05-05 22:24:36 +02:00
|
|
|
static const union sockaddr_union sa = PLYMOUTH_SOCKET;
|
2014-01-12 21:55:10 +01:00
|
|
|
_cleanup_free_ char *message = NULL;
|
|
|
|
_cleanup_close_ int fd = -1;
|
2016-05-05 22:24:36 +02:00
|
|
|
int n = 0;
|
2010-10-06 03:55:49 +02:00
|
|
|
|
|
|
|
/* Don't generate plymouth events if the service was already
|
|
|
|
* started and we're just deserializing */
|
2016-02-24 21:36:09 +01:00
|
|
|
if (MANAGER_IS_RELOADING(m))
|
2010-10-06 03:55:49 +02:00
|
|
|
return;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2010-10-06 03:55:49 +02:00
|
|
|
return;
|
|
|
|
|
2015-09-07 13:42:47 +02:00
|
|
|
if (detect_container() > 0)
|
2013-11-20 03:44:11 +01:00
|
|
|
return;
|
|
|
|
|
2017-09-29 09:58:22 +02:00
|
|
|
if (!IN_SET(u->type, UNIT_SERVICE, UNIT_MOUNT, UNIT_SWAP))
|
2010-10-06 03:55:49 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* We set SOCK_NONBLOCK here so that we rather drop the
|
|
|
|
* message then wait for plymouth */
|
2013-03-25 00:45:16 +01:00
|
|
|
fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
|
|
|
|
if (fd < 0) {
|
2014-11-28 19:29:59 +01:00
|
|
|
log_error_errno(errno, "socket() failed: %m");
|
2010-10-06 03:55:49 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-05-05 22:24:36 +02:00
|
|
|
if (connect(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un)) < 0) {
|
2014-01-12 21:55:10 +01:00
|
|
|
if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
|
2014-11-28 19:29:59 +01:00
|
|
|
log_error_errno(errno, "connect() failed: %m");
|
2014-01-12 21:55:10 +01:00
|
|
|
return;
|
2010-10-06 03:55:49 +02:00
|
|
|
}
|
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
if (asprintf(&message, "U\002%c%s%n", (int) (strlen(u->id) + 1), u->id, &n) < 0) {
|
2012-07-25 23:55:59 +02:00
|
|
|
log_oom();
|
2014-01-12 21:55:10 +01:00
|
|
|
return;
|
2010-10-06 03:55:49 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
errno = 0;
|
2014-01-12 21:55:10 +01:00
|
|
|
if (write(fd, message, n + 1) != n + 1)
|
|
|
|
if (!IN_SET(errno, EPIPE, EAGAIN, ENOENT, ECONNREFUSED, ECONNRESET, ECONNABORTED))
|
2014-11-28 19:29:59 +01:00
|
|
|
log_error_errno(errno, "Failed to write Plymouth message: %m");
|
2010-10-06 03:55:49 +02:00
|
|
|
}
|
|
|
|
|
2010-07-20 20:54:33 +02:00
|
|
|
int manager_open_serialization(Manager *m, FILE **_f) {
|
2017-02-12 00:33:16 +01:00
|
|
|
int fd;
|
2010-04-21 03:27:44 +02:00
|
|
|
FILE *f;
|
|
|
|
|
|
|
|
assert(_f);
|
|
|
|
|
2017-02-12 00:33:16 +01:00
|
|
|
fd = open_serialization_fd("systemd-state");
|
|
|
|
if (fd < 0)
|
|
|
|
return fd;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2012-12-22 19:30:07 +01:00
|
|
|
f = fdopen(fd, "w+");
|
2013-12-21 00:19:37 +01:00
|
|
|
if (!f) {
|
2014-03-18 19:22:43 +01:00
|
|
|
safe_close(fd);
|
2010-04-21 03:27:44 +02:00
|
|
|
return -errno;
|
2013-12-21 00:19:37 +01:00
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
*_f = f;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-04-08 14:05:24 +02:00
|
|
|
int manager_serialize(Manager *m, FILE *f, FDSet *fds, bool switching_root) {
|
2017-11-20 21:01:13 +01:00
|
|
|
ManagerTimestamp q;
|
|
|
|
const char *t;
|
2010-04-21 03:27:44 +02:00
|
|
|
Iterator i;
|
|
|
|
Unit *u;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(f);
|
|
|
|
assert(fds);
|
|
|
|
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading++;
|
2011-04-20 03:53:12 +02:00
|
|
|
|
2015-01-21 04:22:15 +01:00
|
|
|
fprintf(f, "current-job-id=%"PRIu32"\n", m->current_job_id);
|
2012-06-29 19:47:38 +02:00
|
|
|
fprintf(f, "n-installed-jobs=%u\n", m->n_installed_jobs);
|
|
|
|
fprintf(f, "n-failed-jobs=%u\n", m->n_failed_jobs);
|
2017-10-24 14:48:54 +02:00
|
|
|
fprintf(f, "taint-usr=%s\n", yes_no(m->taint_usr));
|
|
|
|
fprintf(f, "ready-sent=%s\n", yes_no(m->ready_sent));
|
2018-01-21 13:17:54 +01:00
|
|
|
fprintf(f, "taint-logged=%s\n", yes_no(m->taint_logged));
|
2017-03-20 13:10:43 +01:00
|
|
|
fprintf(f, "service-watchdogs=%s\n", yes_no(m->service_watchdogs));
|
2011-04-07 18:46:39 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
|
|
|
|
/* The userspace and finish timestamps only apply to the host system, hence only serialize them there */
|
|
|
|
if (in_initrd() && IN_SET(q, MANAGER_TIMESTAMP_USERSPACE, MANAGER_TIMESTAMP_FINISH))
|
|
|
|
continue;
|
2012-05-16 14:22:41 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
t = manager_timestamp_to_string(q);
|
|
|
|
{
|
2017-12-14 19:02:29 +01:00
|
|
|
char field[strlen(t) + STRLEN("-timestamp") + 1];
|
2017-11-20 21:01:13 +01:00
|
|
|
strcpy(stpcpy(field, t), "-timestamp");
|
|
|
|
dual_timestamp_serialize(f, field, m->timestamps + q);
|
|
|
|
}
|
2012-05-16 14:22:41 +02:00
|
|
|
}
|
2010-10-18 22:39:06 +02:00
|
|
|
|
2017-02-11 03:44:21 +01:00
|
|
|
if (!switching_root)
|
|
|
|
(void) serialize_environment(f, m->environment);
|
2013-02-12 00:14:39 +01:00
|
|
|
|
2013-12-21 00:19:37 +01:00
|
|
|
if (m->notify_fd >= 0) {
|
|
|
|
int copy;
|
|
|
|
|
|
|
|
copy = fdset_put_dup(fds, m->notify_fd);
|
|
|
|
if (copy < 0)
|
|
|
|
return copy;
|
|
|
|
|
|
|
|
fprintf(f, "notify-fd=%i\n", copy);
|
|
|
|
fprintf(f, "notify-socket=%s\n", m->notify_socket);
|
|
|
|
}
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
if (m->cgroups_agent_fd >= 0) {
|
|
|
|
int copy;
|
|
|
|
|
|
|
|
copy = fdset_put_dup(fds, m->cgroups_agent_fd);
|
|
|
|
if (copy < 0)
|
|
|
|
return copy;
|
|
|
|
|
|
|
|
fprintf(f, "cgroups-agent-fd=%i\n", copy);
|
|
|
|
}
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
if (m->user_lookup_fds[0] >= 0) {
|
|
|
|
int copy0, copy1;
|
|
|
|
|
|
|
|
copy0 = fdset_put_dup(fds, m->user_lookup_fds[0]);
|
|
|
|
if (copy0 < 0)
|
|
|
|
return copy0;
|
|
|
|
|
|
|
|
copy1 = fdset_put_dup(fds, m->user_lookup_fds[1]);
|
|
|
|
if (copy1 < 0)
|
|
|
|
return copy1;
|
|
|
|
|
|
|
|
fprintf(f, "user-lookup=%i %i\n", copy0, copy1);
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:12:01 +02:00
|
|
|
bus_track_serialize(m->subscribed, f, "subscribed");
|
2013-07-10 19:24:03 +02:00
|
|
|
|
2016-07-14 12:37:28 +02:00
|
|
|
r = dynamic_user_serialize(m, f, fds);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
manager_serialize_uid_refs(m, f);
|
|
|
|
manager_serialize_gid_refs(m, f);
|
|
|
|
|
2018-02-06 08:00:34 +01:00
|
|
|
r = exec_runtime_serialize(m, f, fds);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2017-12-11 19:50:30 +01:00
|
|
|
(void) fputc('\n', f);
|
2010-10-20 14:40:44 +02:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
HASHMAP_FOREACH_KEY(u, t, m->units, i) {
|
2012-01-15 12:04:08 +01:00
|
|
|
if (u->id != t)
|
2010-04-21 03:27:44 +02:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Start marker */
|
2017-12-11 19:50:30 +01:00
|
|
|
fputs(u->id, f);
|
|
|
|
fputc('\n', f);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2013-07-10 19:24:03 +02:00
|
|
|
r = unit_serialize(u, f, fds, !switching_root);
|
|
|
|
if (r < 0) {
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2010-04-21 03:27:44 +02:00
|
|
|
return r;
|
2011-04-20 03:53:12 +02:00
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2011-07-06 00:47:39 +02:00
|
|
|
assert(m->n_reloading > 0);
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2011-04-20 03:53:12 +02:00
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
if (ferror(f))
|
|
|
|
return -EIO;
|
|
|
|
|
2011-04-28 22:07:01 +02:00
|
|
|
r = bus_fdset_add_all(m, fds);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_deserialize(Manager *m, FILE *f, FDSet *fds) {
|
|
|
|
int r = 0;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(f);
|
|
|
|
|
|
|
|
log_debug("Deserializing state...");
|
|
|
|
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading++;
|
2010-07-10 04:51:03 +02:00
|
|
|
|
2010-08-11 20:19:27 +02:00
|
|
|
for (;;) {
|
2017-06-24 01:20:54 +02:00
|
|
|
char line[LINE_MAX];
|
|
|
|
const char *val, *l;
|
2010-08-11 20:19:27 +02:00
|
|
|
|
|
|
|
if (!fgets(line, sizeof(line), f)) {
|
|
|
|
if (feof(f))
|
|
|
|
r = 0;
|
|
|
|
else
|
|
|
|
r = -errno;
|
|
|
|
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
char_array_0(line);
|
|
|
|
l = strstrip(line);
|
|
|
|
|
|
|
|
if (l[0] == 0)
|
|
|
|
break;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if ((val = startswith(l, "current-job-id="))) {
|
2011-04-07 18:46:39 +02:00
|
|
|
uint32_t id;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (safe_atou32(val, &id) < 0)
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse current job id value %s", val);
|
2011-04-07 18:46:39 +02:00
|
|
|
else
|
|
|
|
m->current_job_id = MAX(m->current_job_id, id);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "n-installed-jobs="))) {
|
2012-06-29 19:47:38 +02:00
|
|
|
uint32_t n;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (safe_atou32(val, &n) < 0)
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse installed jobs counter %s", val);
|
2012-06-29 19:47:38 +02:00
|
|
|
else
|
|
|
|
m->n_installed_jobs += n;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "n-failed-jobs="))) {
|
2012-06-29 19:47:38 +02:00
|
|
|
uint32_t n;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (safe_atou32(val, &n) < 0)
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse failed jobs counter %s", val);
|
2012-06-29 19:47:38 +02:00
|
|
|
else
|
|
|
|
m->n_failed_jobs += n;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "taint-usr="))) {
|
2011-04-07 18:46:39 +02:00
|
|
|
int b;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
b = parse_boolean(val);
|
2013-11-30 03:53:42 +01:00
|
|
|
if (b < 0)
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse taint /usr flag %s", val);
|
2011-04-07 18:46:39 +02:00
|
|
|
else
|
|
|
|
m->taint_usr = m->taint_usr || b;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2017-10-24 14:48:54 +02:00
|
|
|
} else if ((val = startswith(l, "ready-sent="))) {
|
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(val);
|
|
|
|
if (b < 0)
|
|
|
|
log_notice("Failed to parse ready-sent flag %s", val);
|
|
|
|
else
|
|
|
|
m->ready_sent = m->ready_sent || b;
|
|
|
|
|
2018-01-21 13:17:54 +01:00
|
|
|
} else if ((val = startswith(l, "taint-logged="))) {
|
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(val);
|
|
|
|
if (b < 0)
|
|
|
|
log_notice("Failed to parse taint-logged flag %s", val);
|
|
|
|
else
|
|
|
|
m->taint_logged = m->taint_logged || b;
|
|
|
|
|
2017-03-20 13:10:43 +01:00
|
|
|
} else if ((val = startswith(l, "service-watchdogs="))) {
|
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(val);
|
|
|
|
if (b < 0)
|
|
|
|
log_notice("Failed to parse service-watchdogs flag %s", val);
|
|
|
|
else
|
|
|
|
m->service_watchdogs = b;
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
} else if (startswith(l, "env=")) {
|
2017-02-11 03:44:21 +01:00
|
|
|
r = deserialize_environment(&m->environment, l);
|
2017-06-24 01:20:54 +02:00
|
|
|
if (r == -ENOMEM)
|
|
|
|
goto finish;
|
2015-04-06 20:11:41 +02:00
|
|
|
if (r < 0)
|
2017-06-24 01:20:54 +02:00
|
|
|
log_notice_errno(r, "Failed to parse environment entry: \"%s\": %m", l);
|
2013-11-30 03:53:42 +01:00
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "notify-fd="))) {
|
2013-12-21 00:19:37 +01:00
|
|
|
int fd;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse notify fd: \"%s\"", val);
|
2013-12-21 00:19:37 +01:00
|
|
|
else {
|
2014-03-18 19:22:43 +01:00
|
|
|
m->notify_event_source = sd_event_source_unref(m->notify_event_source);
|
|
|
|
safe_close(m->notify_fd);
|
2013-12-21 00:19:37 +01:00
|
|
|
m->notify_fd = fdset_remove(fds, fd);
|
|
|
|
}
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "notify-socket="))) {
|
2013-12-21 00:19:37 +01:00
|
|
|
char *n;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
n = strdup(val);
|
2013-12-21 00:19:37 +01:00
|
|
|
if (!n) {
|
|
|
|
r = -ENOMEM;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
free(m->notify_socket);
|
|
|
|
m->notify_socket = n;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "cgroups-agent-fd="))) {
|
2016-05-04 20:43:23 +02:00
|
|
|
int fd;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (safe_atoi(val, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse cgroups agent fd: %s", val);
|
2016-05-04 20:43:23 +02:00
|
|
|
else {
|
|
|
|
m->cgroups_agent_event_source = sd_event_source_unref(m->cgroups_agent_event_source);
|
|
|
|
safe_close(m->cgroups_agent_fd);
|
|
|
|
m->cgroups_agent_fd = fdset_remove(fds, fd);
|
|
|
|
}
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "user-lookup="))) {
|
2016-08-01 19:24:40 +02:00
|
|
|
int fd0, fd1;
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (sscanf(val, "%i %i", &fd0, &fd1) != 2 || fd0 < 0 || fd1 < 0 || fd0 == fd1 || !fdset_contains(fds, fd0) || !fdset_contains(fds, fd1))
|
2017-06-24 01:17:40 +02:00
|
|
|
log_notice("Failed to parse user lookup fd: %s", val);
|
2016-08-01 19:24:40 +02:00
|
|
|
else {
|
|
|
|
m->user_lookup_event_source = sd_event_source_unref(m->user_lookup_event_source);
|
|
|
|
safe_close_pair(m->user_lookup_fds);
|
|
|
|
m->user_lookup_fds[0] = fdset_remove(fds, fd0);
|
|
|
|
m->user_lookup_fds[1] = fdset_remove(fds, fd1);
|
|
|
|
}
|
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
} else if ((val = startswith(l, "dynamic-user=")))
|
|
|
|
dynamic_user_deserialize_one(m, val, fds);
|
|
|
|
else if ((val = startswith(l, "destroy-ipc-uid=")))
|
|
|
|
manager_deserialize_uid_refs_one(m, val);
|
|
|
|
else if ((val = startswith(l, "destroy-ipc-gid=")))
|
|
|
|
manager_deserialize_gid_refs_one(m, val);
|
2018-02-06 08:00:34 +01:00
|
|
|
else if ((val = startswith(l, "exec-runtime=")))
|
|
|
|
exec_runtime_deserialize_one(m, val, fds);
|
2016-10-22 22:11:41 +02:00
|
|
|
else if ((val = startswith(l, "subscribed="))) {
|
2016-08-15 18:12:01 +02:00
|
|
|
|
2016-10-22 22:11:41 +02:00
|
|
|
if (strv_extend(&m->deserialized_subscribed, val) < 0)
|
2016-08-15 18:12:01 +02:00
|
|
|
log_oom();
|
2017-11-20 21:01:13 +01:00
|
|
|
} else {
|
|
|
|
ManagerTimestamp q;
|
|
|
|
|
|
|
|
for (q = 0; q < _MANAGER_TIMESTAMP_MAX; q++) {
|
|
|
|
val = startswith(l, manager_timestamp_to_string(q));
|
|
|
|
if (!val)
|
|
|
|
continue;
|
2016-08-15 18:12:01 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
val = startswith(val, "-timestamp=");
|
|
|
|
if (val)
|
|
|
|
break;
|
|
|
|
}
|
2016-08-15 18:12:01 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
if (q < _MANAGER_TIMESTAMP_MAX) /* found it */
|
|
|
|
dual_timestamp_deserialize(val, m->timestamps + q);
|
|
|
|
else if (!startswith(l, "kdbus-fd=")) /* ignore kdbus */
|
|
|
|
log_notice("Unknown serialization item '%s'", l);
|
|
|
|
}
|
2010-08-11 20:19:27 +02:00
|
|
|
}
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
for (;;) {
|
|
|
|
Unit *u;
|
|
|
|
char name[UNIT_NAME_MAX+2];
|
2017-07-31 08:05:35 +02:00
|
|
|
const char* unit_name;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
/* Start marker */
|
|
|
|
if (!fgets(name, sizeof(name), f)) {
|
|
|
|
if (feof(f))
|
2010-08-11 20:19:27 +02:00
|
|
|
r = 0;
|
|
|
|
else
|
|
|
|
r = -errno;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2010-07-10 04:51:03 +02:00
|
|
|
goto finish;
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
char_array_0(name);
|
2017-07-31 08:05:35 +02:00
|
|
|
unit_name = strstrip(name);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2017-07-31 08:05:35 +02:00
|
|
|
r = manager_load_unit(m, unit_name, NULL, NULL, &u);
|
|
|
|
if (r < 0) {
|
|
|
|
log_notice_errno(r, "Failed to load unit \"%s\", skipping deserialization: %m", unit_name);
|
|
|
|
if (r == -ENOMEM)
|
|
|
|
goto finish;
|
|
|
|
unit_deserialize_skip(f);
|
|
|
|
continue;
|
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2012-12-22 19:30:07 +01:00
|
|
|
r = unit_deserialize(u, f, fds);
|
2017-07-31 08:05:35 +02:00
|
|
|
if (r < 0) {
|
|
|
|
log_notice_errno(r, "Failed to deserialize unit \"%s\": %m", unit_name);
|
|
|
|
if (r == -ENOMEM)
|
|
|
|
goto finish;
|
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2010-08-11 20:19:27 +02:00
|
|
|
finish:
|
2013-12-10 18:28:26 +01:00
|
|
|
if (ferror(f))
|
2010-07-10 04:51:03 +02:00
|
|
|
r = -EIO;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2011-07-06 00:47:39 +02:00
|
|
|
assert(m->n_reloading > 0);
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2010-07-10 04:51:03 +02:00
|
|
|
|
|
|
|
return r;
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
static void manager_flush_finished_jobs(Manager *m) {
|
|
|
|
Job *j;
|
|
|
|
|
|
|
|
while ((j = set_steal_first(m->pending_finished_jobs))) {
|
|
|
|
bus_job_send_removed_signal(j);
|
|
|
|
job_free(j);
|
|
|
|
}
|
|
|
|
|
|
|
|
m->pending_finished_jobs = set_free(m->pending_finished_jobs);
|
|
|
|
}
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
int manager_reload(Manager *m) {
|
|
|
|
int r, q;
|
2013-10-12 01:33:48 +02:00
|
|
|
_cleanup_fclose_ FILE *f = NULL;
|
|
|
|
_cleanup_fdset_free_ FDSet *fds = NULL;
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2012-05-23 03:43:29 +02:00
|
|
|
r = manager_open_serialization(m, &f);
|
|
|
|
if (r < 0)
|
2010-04-21 03:27:44 +02:00
|
|
|
return r;
|
|
|
|
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading++;
|
2013-11-19 21:12:59 +01:00
|
|
|
bus_manager_send_reloading(m, true);
|
2011-04-20 03:53:12 +02:00
|
|
|
|
2012-05-23 03:43:29 +02:00
|
|
|
fds = fdset_new();
|
|
|
|
if (!fds) {
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2013-10-12 01:33:48 +02:00
|
|
|
return -ENOMEM;
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
2013-04-08 14:05:24 +02:00
|
|
|
r = manager_serialize(m, f, fds, false);
|
2012-05-23 03:43:29 +02:00
|
|
|
if (r < 0) {
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2013-10-12 01:33:48 +02:00
|
|
|
return r;
|
2011-04-20 03:53:12 +02:00
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
if (fseeko(f, 0, SEEK_SET) < 0) {
|
2016-02-23 05:32:04 +01:00
|
|
|
m->n_reloading--;
|
2013-10-12 01:33:48 +02:00
|
|
|
return -errno;
|
2010-04-21 03:27:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* From here on there is no way back. */
|
|
|
|
manager_clear_jobs_and_units(m);
|
2016-04-06 20:47:44 +02:00
|
|
|
lookup_paths_flush_generator(&m->lookup_paths);
|
2010-06-15 14:45:15 +02:00
|
|
|
lookup_paths_free(&m->lookup_paths);
|
2018-02-06 08:00:34 +01:00
|
|
|
exec_runtime_vacuum(m);
|
2016-07-14 12:37:28 +02:00
|
|
|
dynamic_user_vacuum(m, false);
|
2016-08-01 19:24:40 +02:00
|
|
|
m->uid_refs = hashmap_free(m->uid_refs);
|
|
|
|
m->gid_refs = hashmap_free(m->gid_refs);
|
2010-06-15 14:32:26 +02:00
|
|
|
|
2016-04-07 18:48:01 +02:00
|
|
|
q = lookup_paths_init(&m->lookup_paths, m->unit_file_scope, 0, NULL);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
if (q < 0 && r >= 0)
|
|
|
|
r = q;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
q = manager_run_environment_generators(m);
|
|
|
|
if (q < 0 && r >= 0)
|
|
|
|
r = q;
|
|
|
|
|
2016-02-24 15:31:33 +01:00
|
|
|
/* Find new unit paths */
|
|
|
|
q = manager_run_generators(m);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
if (q < 0 && r >= 0)
|
2012-05-23 03:43:29 +02:00
|
|
|
r = q;
|
|
|
|
|
2016-02-25 02:32:19 +01:00
|
|
|
lookup_paths_reduce(&m->lookup_paths);
|
2010-11-11 21:28:33 +01:00
|
|
|
manager_build_unit_path_cache(m);
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
/* First, enumerate what we can from all config files */
|
2015-11-10 20:36:37 +01:00
|
|
|
manager_enumerate(m);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
/* Second, deserialize our stored data */
|
2012-05-23 03:43:29 +02:00
|
|
|
q = manager_deserialize(m, f, fds);
|
2017-07-31 08:05:35 +02:00
|
|
|
if (q < 0) {
|
|
|
|
log_error_errno(q, "Deserialization failed: %m");
|
|
|
|
|
|
|
|
if (r >= 0)
|
|
|
|
r = q;
|
|
|
|
}
|
2010-04-21 03:27:44 +02:00
|
|
|
|
|
|
|
fclose(f);
|
|
|
|
f = NULL;
|
|
|
|
|
2014-01-10 02:49:18 +01:00
|
|
|
/* Re-register notify_fd as event source */
|
|
|
|
q = manager_setup_notify(m);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
if (q < 0 && r >= 0)
|
2014-01-10 02:49:18 +01:00
|
|
|
r = q;
|
|
|
|
|
2016-05-04 20:43:23 +02:00
|
|
|
q = manager_setup_cgroups_agent(m);
|
|
|
|
if (q < 0 && r >= 0)
|
|
|
|
r = q;
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
q = manager_setup_user_lookup_fd(m);
|
|
|
|
if (q < 0 && r >= 0)
|
|
|
|
r = q;
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
/* Third, fire things up! */
|
2015-04-24 16:09:15 +02:00
|
|
|
manager_coldplug(m);
|
2010-04-21 03:27:44 +02:00
|
|
|
|
2016-07-14 12:37:28 +02:00
|
|
|
/* Release any dynamic users no longer referenced */
|
|
|
|
dynamic_user_vacuum(m, true);
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
/* Release any references to UIDs/GIDs no longer referenced, and destroy any IPC owned by them */
|
|
|
|
manager_vacuum_uid_refs(m);
|
|
|
|
manager_vacuum_gid_refs(m);
|
|
|
|
|
2018-02-06 08:00:34 +01:00
|
|
|
exec_runtime_vacuum(m);
|
|
|
|
|
2018-03-21 12:03:45 +01:00
|
|
|
assert(m->n_reloading > 0);
|
|
|
|
m->n_reloading--;
|
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
/* It might be safe to log to the journal now and connect to dbus */
|
2018-01-24 17:42:12 +01:00
|
|
|
manager_recheck_journal(m);
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
manager_recheck_dbus(m);
|
2018-01-24 17:42:12 +01:00
|
|
|
|
2015-12-22 11:37:09 +01:00
|
|
|
/* Sync current state of bus names with our set of listening units */
|
2018-02-07 22:36:51 +01:00
|
|
|
q = manager_enqueue_sync_bus_names(m);
|
|
|
|
if (q < 0 && r >= 0)
|
|
|
|
r = q;
|
2015-12-22 11:37:09 +01:00
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
if (!MANAGER_IS_RELOADING(m))
|
|
|
|
manager_flush_finished_jobs(m);
|
|
|
|
|
2013-07-10 21:10:53 +02:00
|
|
|
m->send_reloading_done = true;
|
|
|
|
|
2010-04-21 03:27:44 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-08-31 00:23:34 +02:00
|
|
|
void manager_reset_failed(Manager *m) {
|
2010-07-18 04:58:01 +02:00
|
|
|
Unit *u;
|
|
|
|
Iterator i;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
HASHMAP_FOREACH(u, m->units, i)
|
2010-08-31 00:23:34 +02:00
|
|
|
unit_reset_failed(u);
|
2010-07-18 04:58:01 +02:00
|
|
|
}
|
|
|
|
|
2013-04-26 02:57:41 +02:00
|
|
|
bool manager_unit_inactive_or_pending(Manager *m, const char *name) {
|
2010-09-01 03:30:59 +02:00
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(name);
|
|
|
|
|
|
|
|
/* Returns true if the unit is inactive or going down */
|
2013-01-02 22:03:35 +01:00
|
|
|
u = manager_get_unit(m, name);
|
|
|
|
if (!u)
|
2010-09-01 03:30:59 +02:00
|
|
|
return true;
|
|
|
|
|
2013-04-26 02:57:41 +02:00
|
|
|
return unit_inactive_or_pending(u);
|
2010-09-01 03:30:59 +02:00
|
|
|
}
|
|
|
|
|
2018-01-21 13:17:54 +01:00
|
|
|
static void log_taint_string(Manager *m) {
|
|
|
|
_cleanup_free_ char *taint = NULL;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
if (MANAGER_IS_USER(m) || m->taint_logged)
|
|
|
|
return;
|
|
|
|
|
|
|
|
m->taint_logged = true; /* only check for taint once */
|
|
|
|
|
|
|
|
taint = manager_taint_string(m);
|
|
|
|
if (isempty(taint))
|
|
|
|
return;
|
|
|
|
|
|
|
|
log_struct(LOG_NOTICE,
|
|
|
|
LOG_MESSAGE("System is tainted: %s", taint),
|
|
|
|
"TAINT=%s", taint,
|
|
|
|
"MESSAGE_ID=" SD_MESSAGE_TAINTED_STR,
|
|
|
|
NULL);
|
|
|
|
}
|
|
|
|
|
2014-11-02 18:19:38 +01:00
|
|
|
static void manager_notify_finished(Manager *m) {
|
2012-09-13 19:29:46 +02:00
|
|
|
char userspace[FORMAT_TIMESPAN_MAX], initrd[FORMAT_TIMESPAN_MAX], kernel[FORMAT_TIMESPAN_MAX], sum[FORMAT_TIMESPAN_MAX];
|
2012-09-13 18:54:32 +02:00
|
|
|
usec_t firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec;
|
2010-09-21 04:14:38 +02:00
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags)
|
2010-09-21 04:14:38 +02:00
|
|
|
return;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m) && detect_container() <= 0) {
|
2017-12-25 05:08:23 +01:00
|
|
|
char ts[FORMAT_TIMESPAN_MAX];
|
2018-01-20 01:06:34 +01:00
|
|
|
char buf[FORMAT_TIMESPAN_MAX + STRLEN(" (firmware) + ") + FORMAT_TIMESPAN_MAX + STRLEN(" (loader) + ")]
|
|
|
|
= {};
|
|
|
|
char *p = buf;
|
|
|
|
size_t size = sizeof buf;
|
2011-03-14 21:47:41 +01:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
/* Note that MANAGER_TIMESTAMP_KERNEL's monotonic value is always at 0, and
|
|
|
|
* MANAGER_TIMESTAMP_FIRMWARE's and MANAGER_TIMESTAMP_LOADER's monotonic value should be considered
|
2012-09-13 18:54:32 +02:00
|
|
|
* negative values. */
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
firmware_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic - m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic;
|
|
|
|
loader_usec = m->timestamps[MANAGER_TIMESTAMP_LOADER].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
|
|
|
|
userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
|
|
|
|
total_usec = m->timestamps[MANAGER_TIMESTAMP_FIRMWARE].monotonic + m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic;
|
2011-06-27 13:47:03 +02:00
|
|
|
|
2017-12-25 05:08:23 +01:00
|
|
|
if (firmware_usec > 0)
|
|
|
|
size = strpcpyf(&p, size, "%s (firmware) + ", format_timespan(ts, sizeof(ts), firmware_usec, USEC_PER_MSEC));
|
|
|
|
if (loader_usec > 0)
|
|
|
|
size = strpcpyf(&p, size, "%s (loader) + ", format_timespan(ts, sizeof(ts), loader_usec, USEC_PER_MSEC));
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
if (dual_timestamp_is_set(&m->timestamps[MANAGER_TIMESTAMP_INITRD])) {
|
2011-06-27 13:47:03 +02:00
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
/* The initrd case on bare-metal*/
|
|
|
|
kernel_usec = m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
|
|
|
|
initrd_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_INITRD].monotonic;
|
2011-06-27 13:47:03 +02:00
|
|
|
|
2014-08-22 16:41:00 +02:00
|
|
|
log_struct(LOG_INFO,
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
"MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
|
2014-08-22 16:41:00 +02:00
|
|
|
"KERNEL_USEC="USEC_FMT, kernel_usec,
|
|
|
|
"INITRD_USEC="USEC_FMT, initrd_usec,
|
|
|
|
"USERSPACE_USEC="USEC_FMT, userspace_usec,
|
2017-12-25 05:08:23 +01:00
|
|
|
LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (initrd) + %s (userspace) = %s.",
|
|
|
|
buf,
|
2014-11-28 02:05:14 +01:00
|
|
|
format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
|
|
|
|
format_timespan(initrd, sizeof(initrd), initrd_usec, USEC_PER_MSEC),
|
|
|
|
format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
|
|
|
|
format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
|
2014-08-22 16:41:00 +02:00
|
|
|
NULL);
|
2011-06-27 13:47:03 +02:00
|
|
|
} else {
|
2017-11-20 21:01:13 +01:00
|
|
|
/* The initrd-less case on bare-metal*/
|
|
|
|
|
|
|
|
kernel_usec = m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic - m->timestamps[MANAGER_TIMESTAMP_KERNEL].monotonic;
|
2011-06-27 13:47:03 +02:00
|
|
|
initrd_usec = 0;
|
|
|
|
|
2012-08-24 22:43:33 +02:00
|
|
|
log_struct(LOG_INFO,
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
"MESSAGE_ID=" SD_MESSAGE_STARTUP_FINISHED_STR,
|
2014-08-22 16:41:00 +02:00
|
|
|
"KERNEL_USEC="USEC_FMT, kernel_usec,
|
2013-12-30 23:22:26 +01:00
|
|
|
"USERSPACE_USEC="USEC_FMT, userspace_usec,
|
2017-12-25 05:08:23 +01:00
|
|
|
LOG_MESSAGE("Startup finished in %s%s (kernel) + %s (userspace) = %s.",
|
|
|
|
buf,
|
2014-11-28 02:05:14 +01:00
|
|
|
format_timespan(kernel, sizeof(kernel), kernel_usec, USEC_PER_MSEC),
|
|
|
|
format_timespan(userspace, sizeof(userspace), userspace_usec, USEC_PER_MSEC),
|
|
|
|
format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
|
2012-08-24 22:43:33 +02:00
|
|
|
NULL);
|
2014-08-22 16:41:00 +02:00
|
|
|
}
|
|
|
|
} else {
|
2018-01-23 16:32:06 +01:00
|
|
|
/* The container and --user case */
|
2014-08-22 16:41:00 +02:00
|
|
|
firmware_usec = loader_usec = initrd_usec = kernel_usec = 0;
|
2017-11-20 21:01:13 +01:00
|
|
|
total_usec = userspace_usec = m->timestamps[MANAGER_TIMESTAMP_FINISH].monotonic - m->timestamps[MANAGER_TIMESTAMP_USERSPACE].monotonic;
|
2014-08-22 16:41:00 +02:00
|
|
|
|
|
|
|
log_struct(LOG_INFO,
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
"MESSAGE_ID=" SD_MESSAGE_USER_STARTUP_FINISHED_STR,
|
2014-08-22 16:41:00 +02:00
|
|
|
"USERSPACE_USEC="USEC_FMT, userspace_usec,
|
2014-11-28 02:05:14 +01:00
|
|
|
LOG_MESSAGE("Startup finished in %s.",
|
|
|
|
format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC)),
|
2014-08-22 16:41:00 +02:00
|
|
|
NULL);
|
2011-06-27 13:47:03 +02:00
|
|
|
}
|
2010-09-21 04:14:38 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
bus_manager_send_finished(m, firmware_usec, loader_usec, kernel_usec, initrd_usec, userspace_usec, total_usec);
|
2011-06-30 02:15:41 +02:00
|
|
|
|
|
|
|
sd_notifyf(false,
|
2017-10-24 14:48:54 +02:00
|
|
|
m->ready_sent ? "STATUS=Startup finished in %s."
|
|
|
|
: "READY=1\n"
|
|
|
|
"STATUS=Startup finished in %s.",
|
2013-04-04 02:56:56 +02:00
|
|
|
format_timespan(sum, sizeof(sum), total_usec, USEC_PER_MSEC));
|
2017-10-24 14:48:54 +02:00
|
|
|
m->ready_sent = true;
|
2018-01-21 13:17:54 +01:00
|
|
|
|
|
|
|
log_taint_string(m);
|
2010-09-21 04:14:38 +02:00
|
|
|
}
|
|
|
|
|
2018-01-23 16:32:06 +01:00
|
|
|
static void manager_send_ready(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* We send READY=1 on reaching basic.target only when running in --user mode. */
|
|
|
|
if (!MANAGER_IS_USER(m) || m->ready_sent)
|
|
|
|
return;
|
|
|
|
|
|
|
|
m->ready_sent = true;
|
|
|
|
|
|
|
|
sd_notifyf(false,
|
|
|
|
"READY=1\n"
|
|
|
|
"STATUS=Reached " SPECIAL_BASIC_TARGET ".");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void manager_check_basic_target(Manager *m) {
|
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* Small shortcut */
|
|
|
|
if (m->ready_sent && m->taint_logged)
|
|
|
|
return;
|
|
|
|
|
|
|
|
u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
|
|
|
|
if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* For user managers, send out READY=1 as soon as we reach basic.target */
|
|
|
|
manager_send_ready(m);
|
|
|
|
|
|
|
|
/* Log the taint string as soon as we reach basic.target */
|
|
|
|
log_taint_string(m);
|
|
|
|
}
|
|
|
|
|
2014-11-02 18:19:38 +01:00
|
|
|
void manager_check_finished(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
|
2016-02-24 21:36:09 +01:00
|
|
|
if (MANAGER_IS_RELOADING(m))
|
2015-05-19 19:09:03 +02:00
|
|
|
return;
|
|
|
|
|
2018-01-23 16:43:56 +01:00
|
|
|
/* Verify that we have entered the event loop already, and not left it again. */
|
|
|
|
if (!MANAGER_IS_RUNNING(m))
|
2015-05-21 21:34:36 +02:00
|
|
|
return;
|
|
|
|
|
2018-01-23 16:32:06 +01:00
|
|
|
manager_check_basic_target(m);
|
2017-10-24 14:48:54 +02:00
|
|
|
|
2014-11-02 18:19:38 +01:00
|
|
|
if (hashmap_size(m->jobs) > 0) {
|
|
|
|
if (m->jobs_in_progress_event_source)
|
2015-03-14 03:11:09 +01:00
|
|
|
/* Ignore any failure, this is only for feedback */
|
2015-09-11 18:21:53 +02:00
|
|
|
(void) sd_event_source_set_time(m->jobs_in_progress_event_source, now(CLOCK_MONOTONIC) + JOBS_IN_PROGRESS_WAIT_USEC);
|
2014-11-02 18:19:38 +01:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
manager_flip_auto_status(m, false);
|
|
|
|
|
|
|
|
/* Notify Type=idle units that we are done now */
|
|
|
|
manager_close_idle_pipe(m);
|
|
|
|
|
|
|
|
/* Turn off confirm spawn now */
|
2016-11-02 10:38:22 +01:00
|
|
|
m->confirm_spawn = NULL;
|
2014-11-02 18:19:38 +01:00
|
|
|
|
|
|
|
/* No need to update ask password status when we're going non-interactive */
|
|
|
|
manager_close_ask_password(m);
|
|
|
|
|
|
|
|
/* This is no longer the first boot */
|
|
|
|
manager_set_first_boot(m, false);
|
|
|
|
|
2017-11-20 21:24:59 +01:00
|
|
|
if (MANAGER_IS_FINISHED(m))
|
2014-11-02 18:19:38 +01:00
|
|
|
return;
|
|
|
|
|
2017-11-20 21:01:13 +01:00
|
|
|
dual_timestamp_get(m->timestamps + MANAGER_TIMESTAMP_FINISH);
|
2014-11-02 18:19:38 +01:00
|
|
|
|
|
|
|
manager_notify_finished(m);
|
|
|
|
|
2015-09-11 18:21:53 +02:00
|
|
|
manager_invalidate_startup_units(m);
|
2014-11-02 18:19:38 +01:00
|
|
|
}
|
|
|
|
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
static bool generator_path_any(const char* const* paths) {
|
|
|
|
char **path;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
/* Optimize by skipping the whole process by not creating output directories
|
|
|
|
* if no generators are found. */
|
|
|
|
STRV_FOREACH(path, (char**) paths)
|
|
|
|
if (access(*path, F_OK) == 0)
|
|
|
|
found = true;
|
|
|
|
else if (errno != ENOENT)
|
|
|
|
log_warning_errno(errno, "Failed to open generator directory %s: %m", *path);
|
|
|
|
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char* system_env_generator_binary_paths[] = {
|
|
|
|
"/run/systemd/system-environment-generators",
|
|
|
|
"/etc/systemd/system-environment-generators",
|
|
|
|
"/usr/local/lib/systemd/system-environment-generators",
|
|
|
|
SYSTEM_ENV_GENERATOR_PATH,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char* user_env_generator_binary_paths[] = {
|
|
|
|
"/run/systemd/user-environment-generators",
|
|
|
|
"/etc/systemd/user-environment-generators",
|
|
|
|
"/usr/local/lib/systemd/user-environment-generators",
|
|
|
|
USER_ENV_GENERATOR_PATH,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static int manager_run_environment_generators(Manager *m) {
|
|
|
|
char **tmp = NULL; /* this is only used in the forked process, no cleanup here */
|
|
|
|
const char **paths;
|
|
|
|
void* args[] = {&tmp, &tmp, &m->environment};
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_ENV_GENERATORS))
|
|
|
|
return 0;
|
|
|
|
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
paths = MANAGER_IS_SYSTEM(m) ? system_env_generator_binary_paths : user_env_generator_binary_paths;
|
|
|
|
|
|
|
|
if (!generator_path_any(paths))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return execute_directories(paths, DEFAULT_TIMEOUT_USEC, gather_environment, args, NULL);
|
|
|
|
}
|
|
|
|
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
static int manager_run_generators(Manager *m) {
|
2015-05-12 04:30:38 +02:00
|
|
|
_cleanup_strv_free_ char **paths = NULL;
|
2012-05-23 03:43:29 +02:00
|
|
|
const char *argv[5];
|
|
|
|
int r;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2017-09-16 11:19:43 +02:00
|
|
|
if (m->test_run_flags && !(m->test_run_flags & MANAGER_TEST_RUN_GENERATORS))
|
|
|
|
return 0;
|
|
|
|
|
2016-04-06 20:48:58 +02:00
|
|
|
paths = generator_binary_paths(m->unit_file_scope);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
if (!paths)
|
|
|
|
return log_oom();
|
2010-11-11 21:28:33 +01:00
|
|
|
|
manager: run environment generators
Environment file generators are a lot like unit file generators, but not
exactly:
1. environment file generators are run for each manager instance, and their
output is (or at least can be) individualized.
The generators themselves are system-wide, the same for all users.
2. environment file generators are run sequentially, in priority order.
Thus, the lifetime of those files is tied to lifecycle of the manager
instance. Because generators are run sequentially, later generators can use or
modify the output of earlier generators.
Each generator is run with no arguments, and the whole state is stored in the
environment variables. The generator can echo a set of variable assignments to
standard output:
VAR_A=something
VAR_B=something else
This output is parsed, and the next and subsequent generators run with those
updated variables in the environment. After the last generator is done, the
environment that the manager itself exports is updated.
Each generator must return 0, otherwise the output is ignored.
The generators in */user-env-generator are for the user session managers,
including root, and the ones in */system-env-generator are for pid1.
2017-01-22 07:13:47 +01:00
|
|
|
if (!generator_path_any((const char* const*) paths))
|
|
|
|
return 0;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
2016-02-25 01:44:30 +01:00
|
|
|
r = lookup_paths_mkdir_generator(&m->lookup_paths);
|
2012-05-23 03:43:29 +02:00
|
|
|
if (r < 0)
|
|
|
|
goto finish;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
2011-02-15 00:30:11 +01:00
|
|
|
argv[0] = NULL; /* Leave this empty, execute_directory() will fill something in */
|
2016-02-24 15:31:33 +01:00
|
|
|
argv[1] = m->lookup_paths.generator;
|
|
|
|
argv[2] = m->lookup_paths.generator_early;
|
|
|
|
argv[3] = m->lookup_paths.generator_late;
|
2012-05-23 03:43:29 +02:00
|
|
|
argv[4] = NULL;
|
2010-11-11 21:28:33 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
RUN_WITH_UMASK(0022)
|
2017-01-22 21:22:37 +01:00
|
|
|
execute_directories((const char* const*) paths, DEFAULT_TIMEOUT_USEC,
|
|
|
|
NULL, NULL, (char**) argv);
|
2010-11-11 21:28:33 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
finish:
|
2016-02-25 01:44:30 +01:00
|
|
|
lookup_paths_trim_generator(&m->lookup_paths);
|
Implement masking and overriding of generators
Sometimes it is necessary to stop a generator from running. Either
because of a bug, or for testing, or some other reason. The only way
to do that would be to rename or chmod the generator binary, which is
inconvenient and does not survive upgrades. Allow masking and
overriding generators similarly to units and other configuration
files.
For the systemd instance, masking would be more common, rather than
overriding generators. For the user instances, it may also be useful
for users to have generators in $XDG_CONFIG_HOME to augment or
override system-wide generators.
Directories are searched according to the usual scheme (/usr/lib,
/usr/local/lib, /run, /etc), and files with the same name in higher
priority directories override files with the same name in lower
priority directories. Empty files and links to /dev/null mask a given
name.
https://bugs.freedesktop.org/show_bug.cgi?id=87230
2015-01-09 02:47:25 +01:00
|
|
|
return r;
|
2010-11-11 21:28:33 +01:00
|
|
|
}
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
int manager_environment_add(Manager *m, char **minus, char **plus) {
|
|
|
|
char **a = NULL, **b = NULL, **l;
|
2013-06-09 07:08:46 +02:00
|
|
|
assert(m);
|
2013-10-01 00:08:30 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
l = m->environment;
|
2013-10-01 00:08:30 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
if (!strv_isempty(minus)) {
|
|
|
|
a = strv_env_delete(l, 1, minus);
|
|
|
|
if (!a)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
l = a;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strv_isempty(plus)) {
|
|
|
|
b = strv_env_merge(2, l, plus);
|
2014-09-16 21:11:02 +02:00
|
|
|
if (!b) {
|
|
|
|
strv_free(a);
|
2013-11-19 21:12:59 +01:00
|
|
|
return -ENOMEM;
|
2014-09-16 21:11:02 +02:00
|
|
|
}
|
2013-10-01 00:08:30 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
l = b;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (m->environment != l)
|
|
|
|
strv_free(m->environment);
|
|
|
|
if (a != l)
|
|
|
|
strv_free(a);
|
|
|
|
if (b != l)
|
|
|
|
strv_free(b);
|
|
|
|
|
2014-01-12 13:10:40 +01:00
|
|
|
m->environment = l;
|
2018-01-10 18:28:42 +01:00
|
|
|
manager_sanitize_environment(m);
|
2014-01-12 13:10:40 +01:00
|
|
|
|
2013-06-09 07:08:46 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-21 18:03:40 +01:00
|
|
|
int manager_set_default_rlimits(Manager *m, struct rlimit **default_rlimit) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2014-03-05 02:29:58 +01:00
|
|
|
for (i = 0; i < _RLIMIT_MAX; i++) {
|
2016-01-14 08:38:12 +01:00
|
|
|
m->rlimit[i] = mfree(m->rlimit[i]);
|
|
|
|
|
2012-05-23 03:43:29 +02:00
|
|
|
if (!default_rlimit[i])
|
|
|
|
continue;
|
2012-03-21 18:03:40 +01:00
|
|
|
|
2012-05-23 03:43:29 +02:00
|
|
|
m->rlimit[i] = newdup(struct rlimit, default_rlimit[i], 1);
|
|
|
|
if (!m->rlimit[i])
|
2016-10-18 19:38:41 +02:00
|
|
|
return log_oom();
|
2012-03-21 18:03:40 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
void manager_recheck_dbus(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* Connects to the bus if the dbus service and socket are running. If we are running in user mode this is all
|
|
|
|
* it does. In system mode we'll also connect to the system bus (which will most likely just reuse the
|
|
|
|
* connection of the API bus). That's because the system bus after all runs as service of the system instance,
|
|
|
|
* while in the user instance we can assume it's already there. */
|
|
|
|
|
2018-03-21 12:03:45 +01:00
|
|
|
if (MANAGER_IS_RELOADING(m))
|
|
|
|
return; /* don't check while we are reloading… */
|
|
|
|
|
core: rework how we connect to the bus
This removes the current bus_init() call, as it had multiple problems:
it munged handling of the three bus connections we care about (private,
"api" and system) into one, even though the conditions when which was
ready are very different. It also added redundant logging, as the
individual calls it called all logged on their own anyway.
The three calls bus_init_api(), bus_init_private() and bus_init_system()
are now made public. A new call manager_dbus_is_running() is added that
works much like manager_journal_is_running() and is a lot more careful
when checking whether dbus is around. Optionally it checks the unit's
deserialized_state rather than state, in order to accomodate for cases
where we cant to connect to the bus before deserializing the
"subscribed" list, before coldplugging the units.
manager_recheck_dbus() is added, that works a lot like
manager_recheck_journal() and is invoked in unit_notify(), i.e. when
units change state.
All in all this should make handling a bit more alike to journal
handling, and it also fixes one major bug: when running in user mode
we'll now connect to the system bus early on, without conditionalizing
this in anyway.
2018-02-07 14:52:22 +01:00
|
|
|
if (manager_dbus_is_running(m, false)) {
|
|
|
|
(void) bus_init_api(m);
|
|
|
|
|
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
(void) bus_init_system(m);
|
|
|
|
} else {
|
|
|
|
(void) bus_done_api(m);
|
|
|
|
|
|
|
|
if (MANAGER_IS_SYSTEM(m))
|
|
|
|
(void) bus_done_system(m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-24 17:42:12 +01:00
|
|
|
static bool manager_journal_is_running(Manager *m) {
|
2011-03-18 04:31:22 +01:00
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
2018-02-07 15:06:15 +01:00
|
|
|
if (m->test_run_flags != 0)
|
|
|
|
return false;
|
|
|
|
|
2018-01-24 17:42:12 +01:00
|
|
|
/* If we are the user manager we can safely assume that the journal is up */
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2018-01-24 17:42:12 +01:00
|
|
|
return true;
|
2011-03-18 04:31:22 +01:00
|
|
|
|
2018-01-24 17:42:12 +01:00
|
|
|
/* Check that the socket is not only up, but in RUNNING state */
|
2012-01-11 03:16:24 +01:00
|
|
|
u = manager_get_unit(m, SPECIAL_JOURNALD_SOCKET);
|
2018-01-24 17:42:12 +01:00
|
|
|
if (!u)
|
|
|
|
return false;
|
|
|
|
if (SOCKET(u)->state != SOCKET_RUNNING)
|
|
|
|
return false;
|
2011-03-18 04:31:22 +01:00
|
|
|
|
2018-01-24 17:42:12 +01:00
|
|
|
/* Similar, check if the daemon itself is fully up, too */
|
2012-01-11 03:16:24 +01:00
|
|
|
u = manager_get_unit(m, SPECIAL_JOURNALD_SERVICE);
|
2018-01-24 17:42:12 +01:00
|
|
|
if (!u)
|
|
|
|
return false;
|
2018-02-07 15:07:00 +01:00
|
|
|
if (!IN_SET(SERVICE(u)->state, SERVICE_RELOAD, SERVICE_RUNNING))
|
2018-01-24 17:42:12 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_recheck_journal(Manager *m) {
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* Don't bother with this unless we are in the special situation of being PID 1 */
|
|
|
|
if (getpid_cached() != 1)
|
2012-01-11 03:16:24 +01:00
|
|
|
return;
|
2011-03-18 04:31:22 +01:00
|
|
|
|
2018-03-21 12:03:45 +01:00
|
|
|
/* Don't check this while we are reloading, things might still change */
|
|
|
|
if (MANAGER_IS_RELOADING(m))
|
|
|
|
return;
|
|
|
|
|
2018-02-07 15:08:18 +01:00
|
|
|
/* The journal is fully and entirely up? If so, let's permit logging to it, if that's configured. If the
|
|
|
|
* journal is down, don't ever log to it, otherwise we might end up deadlocking ourselves as we might trigger
|
|
|
|
* an activation ourselves we can't fulfill. */
|
|
|
|
log_set_prohibit_ipc(!manager_journal_is_running(m));
|
2018-01-26 14:42:53 +01:00
|
|
|
log_open();
|
2011-03-18 04:31:22 +01:00
|
|
|
}
|
|
|
|
|
2014-01-28 04:27:07 +01:00
|
|
|
void manager_set_show_status(Manager *m, ShowStatus mode) {
|
2011-09-01 21:05:06 +02:00
|
|
|
assert(m);
|
2014-01-28 04:27:07 +01:00
|
|
|
assert(IN_SET(mode, SHOW_STATUS_AUTO, SHOW_STATUS_NO, SHOW_STATUS_YES, SHOW_STATUS_TEMPORARY));
|
2011-09-01 21:05:06 +02:00
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2011-09-01 21:05:06 +02:00
|
|
|
return;
|
|
|
|
|
2015-12-09 00:23:39 +01:00
|
|
|
if (m->show_status != mode)
|
|
|
|
log_debug("%s showing of status.",
|
|
|
|
mode == SHOW_STATUS_NO ? "Disabling" : "Enabling");
|
2014-01-28 04:27:07 +01:00
|
|
|
m->show_status = mode;
|
2011-09-01 21:05:06 +02:00
|
|
|
|
2014-01-28 04:27:07 +01:00
|
|
|
if (mode > 0)
|
2015-10-14 19:32:46 +02:00
|
|
|
(void) touch("/run/systemd/show-status");
|
2011-09-01 21:05:06 +02:00
|
|
|
else
|
2015-10-14 19:32:46 +02:00
|
|
|
(void) unlink("/run/systemd/show-status");
|
2011-09-01 21:05:06 +02:00
|
|
|
}
|
|
|
|
|
2014-10-28 04:02:54 +01:00
|
|
|
static bool manager_get_show_status(Manager *m, StatusType type) {
|
2011-09-01 21:05:06 +02:00
|
|
|
assert(m);
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2011-09-01 21:05:06 +02:00
|
|
|
return false;
|
|
|
|
|
systemd: do not output status messages once gettys are running
Make Type=idle communication bidirectional: when bootup is finished,
the manager, as before, signals idling Type=idle jobs to continue.
However, if the boot takes too long, idling jobs signal the manager
that they have had enough, wait a tiny bit more, and continue, taking
ownership of the console. The manager, when signalled that Type=idle
jobs are done, makes a note and will not write to the console anymore.
This is a cosmetic issue, but quite noticable, so let's just fix it.
Based on Harald Hoyer's patch.
https://bugs.freedesktop.org/show_bug.cgi?id=54247
http://unix.stackexchange.com/questions/51805/systemd-messages-after-starting-login/
2013-07-16 03:34:57 +02:00
|
|
|
if (m->no_console_output)
|
|
|
|
return false;
|
|
|
|
|
2014-08-22 18:07:18 +02:00
|
|
|
if (!IN_SET(manager_state(m), MANAGER_INITIALIZING, MANAGER_STARTING, MANAGER_STOPPING))
|
2014-03-12 22:27:13 +01:00
|
|
|
return false;
|
|
|
|
|
2014-10-26 02:30:51 +02:00
|
|
|
/* If we cannot find out the status properly, just proceed. */
|
2014-10-28 04:14:23 +01:00
|
|
|
if (type != STATUS_TYPE_EMERGENCY && manager_check_ask_password(m) > 0)
|
2014-10-26 02:30:51 +02:00
|
|
|
return false;
|
|
|
|
|
2018-01-24 19:52:29 +01:00
|
|
|
return m->show_status > 0;
|
2011-09-01 21:05:06 +02:00
|
|
|
}
|
2012-11-22 00:38:55 +01:00
|
|
|
|
2016-11-02 10:38:22 +01:00
|
|
|
const char *manager_get_confirm_spawn(Manager *m) {
|
|
|
|
static int last_errno = 0;
|
|
|
|
const char *vc = m->confirm_spawn;
|
|
|
|
struct stat st;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* Here's the deal: we want to test the validity of the console but don't want
|
|
|
|
* PID1 to go through the whole console process which might block. But we also
|
|
|
|
* want to warn the user only once if something is wrong with the console so we
|
|
|
|
* cannot do the sanity checks after spawning our children. So here we simply do
|
|
|
|
* really basic tests to hopefully trap common errors.
|
|
|
|
*
|
|
|
|
* If the console suddenly disappear at the time our children will really it
|
|
|
|
* then they will simply fail to acquire it and a positive answer will be
|
|
|
|
* assumed. New children will fallback to /dev/console though.
|
|
|
|
*
|
|
|
|
* Note: TTYs are devices that can come and go any time, and frequently aren't
|
|
|
|
* available yet during early boot (consider a USB rs232 dongle...). If for any
|
|
|
|
* reason the configured console is not ready, we fallback to the default
|
|
|
|
* console. */
|
|
|
|
|
|
|
|
if (!vc || path_equal(vc, "/dev/console"))
|
|
|
|
return vc;
|
|
|
|
|
|
|
|
r = stat(vc, &st);
|
|
|
|
if (r < 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
if (!S_ISCHR(st.st_mode)) {
|
|
|
|
errno = ENOTTY;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
last_errno = 0;
|
|
|
|
return vc;
|
|
|
|
fail:
|
|
|
|
if (last_errno != errno) {
|
|
|
|
last_errno = errno;
|
|
|
|
log_warning_errno(errno, "Failed to open %s: %m, using default console", vc);
|
|
|
|
}
|
|
|
|
return "/dev/console";
|
|
|
|
}
|
|
|
|
|
2014-07-07 19:25:31 +02:00
|
|
|
void manager_set_first_boot(Manager *m, bool b) {
|
|
|
|
assert(m);
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(m))
|
2014-07-07 19:25:31 +02:00
|
|
|
return;
|
|
|
|
|
2015-09-01 02:34:19 +02:00
|
|
|
if (m->first_boot != (int) b) {
|
|
|
|
if (b)
|
|
|
|
(void) touch("/run/systemd/first-boot");
|
|
|
|
else
|
|
|
|
(void) unlink("/run/systemd/first-boot");
|
|
|
|
}
|
2014-07-07 19:25:31 +02:00
|
|
|
|
2015-09-01 02:34:19 +02:00
|
|
|
m->first_boot = b;
|
2014-07-07 19:25:31 +02:00
|
|
|
}
|
|
|
|
|
2016-11-15 09:29:04 +01:00
|
|
|
void manager_disable_confirm_spawn(void) {
|
|
|
|
(void) touch("/run/systemd/confirm_spawn_disabled");
|
|
|
|
}
|
|
|
|
|
|
|
|
bool manager_is_confirm_spawn_disabled(Manager *m) {
|
|
|
|
if (!m->confirm_spawn)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return access("/run/systemd/confirm_spawn_disabled", F_OK) >= 0;
|
|
|
|
}
|
|
|
|
|
2014-10-28 04:02:54 +01:00
|
|
|
void manager_status_printf(Manager *m, StatusType type, const char *status, const char *format, ...) {
|
2013-02-28 00:14:40 +01:00
|
|
|
va_list ap;
|
|
|
|
|
2014-11-06 06:05:38 +01:00
|
|
|
/* If m is NULL, assume we're after shutdown and let the messages through. */
|
|
|
|
|
|
|
|
if (m && !manager_get_show_status(m, type))
|
2013-02-28 00:14:40 +01:00
|
|
|
return;
|
|
|
|
|
2013-02-28 00:03:22 +01:00
|
|
|
/* XXX We should totally drop the check for ephemeral here
|
|
|
|
* and thus effectively make 'Type=idle' pointless. */
|
2014-11-06 06:05:38 +01:00
|
|
|
if (type == STATUS_TYPE_EPHEMERAL && m && m->n_on_console > 0)
|
2013-02-28 00:03:22 +01:00
|
|
|
return;
|
|
|
|
|
2013-02-28 00:14:40 +01:00
|
|
|
va_start(ap, format);
|
2014-10-28 04:02:54 +01:00
|
|
|
status_vprintf(status, true, type == STATUS_TYPE_EPHEMERAL, format, ap);
|
2013-02-28 00:14:40 +01:00
|
|
|
va_end(ap);
|
|
|
|
}
|
|
|
|
|
2013-09-26 20:14:24 +02:00
|
|
|
Set *manager_get_units_requiring_mounts_for(Manager *m, const char *path) {
|
|
|
|
char p[strlen(path)+1];
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(path);
|
|
|
|
|
|
|
|
strcpy(p, path);
|
2018-05-31 16:39:31 +02:00
|
|
|
path_simplify(p, false);
|
2013-09-26 20:14:24 +02:00
|
|
|
|
|
|
|
return hashmap_get(m->units_requiring_mounts_for, streq(p, "/") ? "" : p);
|
|
|
|
}
|
2014-03-03 17:14:07 +01:00
|
|
|
|
2015-09-11 17:25:35 +02:00
|
|
|
int manager_update_failed_units(Manager *m, Unit *u, bool failed) {
|
2015-02-18 17:22:37 +01:00
|
|
|
unsigned size;
|
2015-09-11 17:25:35 +02:00
|
|
|
int r;
|
2015-02-18 17:22:37 +01:00
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(u->manager == m);
|
|
|
|
|
|
|
|
size = set_size(m->failed_units);
|
|
|
|
|
2015-03-14 03:10:06 +01:00
|
|
|
if (failed) {
|
2015-09-11 17:25:35 +02:00
|
|
|
r = set_ensure_allocated(&m->failed_units, NULL);
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
|
|
|
|
2015-03-14 03:10:06 +01:00
|
|
|
if (set_put(m->failed_units, u) < 0)
|
2015-09-11 17:25:35 +02:00
|
|
|
return log_oom();
|
2015-03-14 03:10:06 +01:00
|
|
|
} else
|
2015-09-11 17:25:35 +02:00
|
|
|
(void) set_remove(m->failed_units, u);
|
2015-02-18 17:22:37 +01:00
|
|
|
|
|
|
|
if (set_size(m->failed_units) != size)
|
|
|
|
bus_manager_send_change_signal(m);
|
2015-09-11 17:25:35 +02:00
|
|
|
|
|
|
|
return 0;
|
2015-02-18 17:22:37 +01:00
|
|
|
}
|
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
ManagerState manager_state(Manager *m) {
|
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
/* Did we ever finish booting? If not then we are still starting up */
|
2017-11-20 21:24:59 +01:00
|
|
|
if (!MANAGER_IS_FINISHED(m)) {
|
2014-08-22 18:07:18 +02:00
|
|
|
|
|
|
|
u = manager_get_unit(m, SPECIAL_BASIC_TARGET);
|
|
|
|
if (!u || !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u)))
|
|
|
|
return MANAGER_INITIALIZING;
|
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
return MANAGER_STARTING;
|
2014-08-22 18:07:18 +02:00
|
|
|
}
|
2014-03-12 20:55:13 +01:00
|
|
|
|
2017-11-21 10:10:28 +01:00
|
|
|
/* Is the special shutdown target active or queued? If so, we are in shutdown state */
|
2014-03-12 20:55:13 +01:00
|
|
|
u = manager_get_unit(m, SPECIAL_SHUTDOWN_TARGET);
|
2017-11-23 13:25:56 +01:00
|
|
|
if (u && unit_active_or_pending(u))
|
2014-03-12 20:55:13 +01:00
|
|
|
return MANAGER_STOPPING;
|
|
|
|
|
2017-11-23 17:39:53 +01:00
|
|
|
if (MANAGER_IS_SYSTEM(m)) {
|
|
|
|
/* Are the rescue or emergency targets active or queued? If so we are in maintenance state */
|
|
|
|
u = manager_get_unit(m, SPECIAL_RESCUE_TARGET);
|
|
|
|
if (u && unit_active_or_pending(u))
|
|
|
|
return MANAGER_MAINTENANCE;
|
2014-03-12 20:55:13 +01:00
|
|
|
|
2017-11-23 17:39:53 +01:00
|
|
|
u = manager_get_unit(m, SPECIAL_EMERGENCY_TARGET);
|
|
|
|
if (u && unit_active_or_pending(u))
|
|
|
|
return MANAGER_MAINTENANCE;
|
|
|
|
}
|
2014-03-12 20:55:13 +01:00
|
|
|
|
|
|
|
/* Are there any failed units? If so, we are in degraded mode */
|
|
|
|
if (set_size(m->failed_units) > 0)
|
|
|
|
return MANAGER_DEGRADED;
|
|
|
|
|
|
|
|
return MANAGER_RUNNING;
|
|
|
|
}
|
|
|
|
|
2016-08-01 19:24:40 +02:00
|
|
|
#define DESTROY_IPC_FLAG (UINT32_C(1) << 31)
|
|
|
|
|
|
|
|
static void manager_unref_uid_internal(
|
|
|
|
Manager *m,
|
|
|
|
Hashmap **uid_refs,
|
|
|
|
uid_t uid,
|
|
|
|
bool destroy_now,
|
|
|
|
int (*_clean_ipc)(uid_t uid)) {
|
|
|
|
|
|
|
|
uint32_t c, n;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(uid_refs);
|
|
|
|
assert(uid_is_valid(uid));
|
|
|
|
assert(_clean_ipc);
|
|
|
|
|
|
|
|
/* A generic implementation, covering both manager_unref_uid() and manager_unref_gid(), under the assumption
|
|
|
|
* that uid_t and gid_t are actually defined the same way, with the same validity rules.
|
|
|
|
*
|
|
|
|
* We store a hashmap where the UID/GID is they key and the value is a 32bit reference counter, whose highest
|
|
|
|
* bit is used as flag for marking UIDs/GIDs whose IPC objects to remove when the last reference to the UID/GID
|
|
|
|
* is dropped. The flag is set to on, once at least one reference from a unit where RemoveIPC= is set is added
|
|
|
|
* on a UID/GID. It is reset when the UID's/GID's reference counter drops to 0 again. */
|
|
|
|
|
|
|
|
assert_cc(sizeof(uid_t) == sizeof(gid_t));
|
|
|
|
assert_cc(UID_INVALID == (uid_t) GID_INVALID);
|
|
|
|
|
|
|
|
if (uid == 0) /* We don't keep track of root, and will never destroy it */
|
|
|
|
return;
|
|
|
|
|
|
|
|
c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
|
|
|
|
|
|
|
|
n = c & ~DESTROY_IPC_FLAG;
|
|
|
|
assert(n > 0);
|
|
|
|
n--;
|
|
|
|
|
|
|
|
if (destroy_now && n == 0) {
|
|
|
|
hashmap_remove(*uid_refs, UID_TO_PTR(uid));
|
|
|
|
|
|
|
|
if (c & DESTROY_IPC_FLAG) {
|
|
|
|
log_debug("%s " UID_FMT " is no longer referenced, cleaning up its IPC.",
|
|
|
|
_clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
|
|
|
|
uid);
|
|
|
|
(void) _clean_ipc(uid);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
c = n | (c & DESTROY_IPC_FLAG);
|
|
|
|
assert_se(hashmap_update(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c)) >= 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_unref_uid(Manager *m, uid_t uid, bool destroy_now) {
|
|
|
|
manager_unref_uid_internal(m, &m->uid_refs, uid, destroy_now, clean_ipc_by_uid);
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_unref_gid(Manager *m, gid_t gid, bool destroy_now) {
|
|
|
|
manager_unref_uid_internal(m, &m->gid_refs, (uid_t) gid, destroy_now, clean_ipc_by_gid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int manager_ref_uid_internal(
|
|
|
|
Manager *m,
|
|
|
|
Hashmap **uid_refs,
|
|
|
|
uid_t uid,
|
|
|
|
bool clean_ipc) {
|
|
|
|
|
|
|
|
uint32_t c, n;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(uid_refs);
|
|
|
|
assert(uid_is_valid(uid));
|
|
|
|
|
|
|
|
/* A generic implementation, covering both manager_ref_uid() and manager_ref_gid(), under the assumption
|
|
|
|
* that uid_t and gid_t are actually defined the same way, with the same validity rules. */
|
|
|
|
|
|
|
|
assert_cc(sizeof(uid_t) == sizeof(gid_t));
|
|
|
|
assert_cc(UID_INVALID == (uid_t) GID_INVALID);
|
|
|
|
|
|
|
|
if (uid == 0) /* We don't keep track of root, and will never destroy it */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
|
|
|
|
|
|
|
|
n = c & ~DESTROY_IPC_FLAG;
|
|
|
|
n++;
|
|
|
|
|
|
|
|
if (n & DESTROY_IPC_FLAG) /* check for overflow */
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
c = n | (c & DESTROY_IPC_FLAG) | (clean_ipc ? DESTROY_IPC_FLAG : 0);
|
|
|
|
|
|
|
|
return hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_ref_uid(Manager *m, uid_t uid, bool clean_ipc) {
|
|
|
|
return manager_ref_uid_internal(m, &m->uid_refs, uid, clean_ipc);
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_ref_gid(Manager *m, gid_t gid, bool clean_ipc) {
|
|
|
|
return manager_ref_uid_internal(m, &m->gid_refs, (uid_t) gid, clean_ipc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void manager_vacuum_uid_refs_internal(
|
|
|
|
Manager *m,
|
|
|
|
Hashmap **uid_refs,
|
|
|
|
int (*_clean_ipc)(uid_t uid)) {
|
|
|
|
|
|
|
|
Iterator i;
|
|
|
|
void *p, *k;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(uid_refs);
|
|
|
|
assert(_clean_ipc);
|
|
|
|
|
|
|
|
HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
|
|
|
|
uint32_t c, n;
|
|
|
|
uid_t uid;
|
|
|
|
|
|
|
|
uid = PTR_TO_UID(k);
|
|
|
|
c = PTR_TO_UINT32(p);
|
|
|
|
|
|
|
|
n = c & ~DESTROY_IPC_FLAG;
|
|
|
|
if (n > 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (c & DESTROY_IPC_FLAG) {
|
|
|
|
log_debug("Found unreferenced %s " UID_FMT " after reload/reexec. Cleaning up.",
|
|
|
|
_clean_ipc == clean_ipc_by_uid ? "UID" : "GID",
|
|
|
|
uid);
|
|
|
|
(void) _clean_ipc(uid);
|
|
|
|
}
|
|
|
|
|
|
|
|
assert_se(hashmap_remove(*uid_refs, k) == p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_vacuum_uid_refs(Manager *m) {
|
|
|
|
manager_vacuum_uid_refs_internal(m, &m->uid_refs, clean_ipc_by_uid);
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_vacuum_gid_refs(Manager *m) {
|
|
|
|
manager_vacuum_uid_refs_internal(m, &m->gid_refs, clean_ipc_by_gid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void manager_serialize_uid_refs_internal(
|
|
|
|
Manager *m,
|
|
|
|
FILE *f,
|
|
|
|
Hashmap **uid_refs,
|
|
|
|
const char *field_name) {
|
|
|
|
|
|
|
|
Iterator i;
|
|
|
|
void *p, *k;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(f);
|
|
|
|
assert(uid_refs);
|
|
|
|
assert(field_name);
|
|
|
|
|
|
|
|
/* Serialize the UID reference table. Or actually, just the IPC destruction flag of it, as the actual counter
|
|
|
|
* of it is better rebuild after a reload/reexec. */
|
|
|
|
|
|
|
|
HASHMAP_FOREACH_KEY(p, k, *uid_refs, i) {
|
|
|
|
uint32_t c;
|
|
|
|
uid_t uid;
|
|
|
|
|
|
|
|
uid = PTR_TO_UID(k);
|
|
|
|
c = PTR_TO_UINT32(p);
|
|
|
|
|
|
|
|
if (!(c & DESTROY_IPC_FLAG))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
fprintf(f, "%s=" UID_FMT "\n", field_name, uid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_serialize_uid_refs(Manager *m, FILE *f) {
|
|
|
|
manager_serialize_uid_refs_internal(m, f, &m->uid_refs, "destroy-ipc-uid");
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_serialize_gid_refs(Manager *m, FILE *f) {
|
|
|
|
manager_serialize_uid_refs_internal(m, f, &m->gid_refs, "destroy-ipc-gid");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void manager_deserialize_uid_refs_one_internal(
|
|
|
|
Manager *m,
|
|
|
|
Hashmap** uid_refs,
|
|
|
|
const char *value) {
|
|
|
|
|
|
|
|
uid_t uid;
|
|
|
|
uint32_t c;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(m);
|
|
|
|
assert(uid_refs);
|
|
|
|
assert(value);
|
|
|
|
|
|
|
|
r = parse_uid(value, &uid);
|
|
|
|
if (r < 0 || uid == 0) {
|
|
|
|
log_debug("Unable to parse UID reference serialization");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = hashmap_ensure_allocated(uid_refs, &trivial_hash_ops);
|
|
|
|
if (r < 0) {
|
|
|
|
log_oom();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
c = PTR_TO_UINT32(hashmap_get(*uid_refs, UID_TO_PTR(uid)));
|
|
|
|
if (c & DESTROY_IPC_FLAG)
|
|
|
|
return;
|
|
|
|
|
|
|
|
c |= DESTROY_IPC_FLAG;
|
|
|
|
|
|
|
|
r = hashmap_replace(*uid_refs, UID_TO_PTR(uid), UINT32_TO_PTR(c));
|
|
|
|
if (r < 0) {
|
|
|
|
log_debug("Failed to add UID reference entry");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_deserialize_uid_refs_one(Manager *m, const char *value) {
|
|
|
|
manager_deserialize_uid_refs_one_internal(m, &m->uid_refs, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_deserialize_gid_refs_one(Manager *m, const char *value) {
|
|
|
|
manager_deserialize_uid_refs_one_internal(m, &m->gid_refs, value);
|
|
|
|
}
|
|
|
|
|
|
|
|
int manager_dispatch_user_lookup_fd(sd_event_source *source, int fd, uint32_t revents, void *userdata) {
|
|
|
|
struct buffer {
|
|
|
|
uid_t uid;
|
|
|
|
gid_t gid;
|
|
|
|
char unit_name[UNIT_NAME_MAX+1];
|
|
|
|
} _packed_ buffer;
|
|
|
|
|
|
|
|
Manager *m = userdata;
|
|
|
|
ssize_t l;
|
|
|
|
size_t n;
|
|
|
|
Unit *u;
|
|
|
|
|
|
|
|
assert_se(source);
|
|
|
|
assert_se(m);
|
|
|
|
|
|
|
|
/* Invoked whenever a child process succeeded resolving its user/group to use and sent us the resulting UID/GID
|
|
|
|
* in a datagram. We parse the datagram here and pass it off to the unit, so that it can add a reference to the
|
|
|
|
* UID/GID so that it can destroy the UID/GID's IPC objects when the reference counter drops to 0. */
|
|
|
|
|
|
|
|
l = recv(fd, &buffer, sizeof(buffer), MSG_DONTWAIT);
|
|
|
|
if (l < 0) {
|
2017-10-04 16:01:32 +02:00
|
|
|
if (IN_SET(errno, EINTR, EAGAIN))
|
2016-08-01 19:24:40 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return log_error_errno(errno, "Failed to read from user lookup fd: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((size_t) l <= offsetof(struct buffer, unit_name)) {
|
|
|
|
log_warning("Received too short user lookup message, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((size_t) l > offsetof(struct buffer, unit_name) + UNIT_NAME_MAX) {
|
|
|
|
log_warning("Received too long user lookup message, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!uid_is_valid(buffer.uid) && !gid_is_valid(buffer.gid)) {
|
|
|
|
log_warning("Got user lookup message with invalid UID/GID pair, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = (size_t) l - offsetof(struct buffer, unit_name);
|
|
|
|
if (memchr(buffer.unit_name, 0, n)) {
|
|
|
|
log_warning("Received lookup message with embedded NUL character, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer.unit_name[n] = 0;
|
|
|
|
u = manager_get_unit(m, buffer.unit_name);
|
|
|
|
if (!u) {
|
|
|
|
log_debug("Got user lookup message but unit doesn't exist, ignoring.");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_unit_debug(u, "User lookup succeeded: uid=" UID_FMT " gid=" GID_FMT, buffer.uid, buffer.gid);
|
|
|
|
|
|
|
|
unit_notify_user_lookup(u, buffer.uid, buffer.gid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-12-07 11:27:07 +01:00
|
|
|
char *manager_taint_string(Manager *m) {
|
2017-12-07 11:35:02 +01:00
|
|
|
_cleanup_free_ char *destination = NULL, *overflowuid = NULL, *overflowgid = NULL;
|
2017-12-07 11:27:07 +01:00
|
|
|
char *buf, *e;
|
|
|
|
int r;
|
|
|
|
|
2017-12-14 12:44:21 +01:00
|
|
|
/* Returns a "taint string", e.g. "local-hwclock:var-run-bad".
|
|
|
|
* Only things that are detected at runtime should be tagged
|
|
|
|
* here. For stuff that is set during compilation, emit a warning
|
|
|
|
* in the configuration phase. */
|
|
|
|
|
2017-12-07 11:27:07 +01:00
|
|
|
assert(m);
|
|
|
|
|
|
|
|
buf = new(char, sizeof("split-usr:"
|
|
|
|
"cgroups-missing:"
|
|
|
|
"local-hwclock:"
|
|
|
|
"var-run-bad:"
|
2017-12-07 11:35:02 +01:00
|
|
|
"overflowuid-not-65534:"
|
|
|
|
"overflowgid-not-65534:"));
|
2017-12-07 11:27:07 +01:00
|
|
|
if (!buf)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
e = buf;
|
2017-12-10 11:58:01 +01:00
|
|
|
buf[0] = 0;
|
2017-12-07 11:27:07 +01:00
|
|
|
|
|
|
|
if (m->taint_usr)
|
|
|
|
e = stpcpy(e, "split-usr:");
|
|
|
|
|
|
|
|
if (access("/proc/cgroups", F_OK) < 0)
|
|
|
|
e = stpcpy(e, "cgroups-missing:");
|
|
|
|
|
|
|
|
if (clock_is_localtime(NULL) > 0)
|
|
|
|
e = stpcpy(e, "local-hwclock:");
|
|
|
|
|
|
|
|
r = readlink_malloc("/var/run", &destination);
|
|
|
|
if (r < 0 || !PATH_IN_SET(destination, "../run", "/run"))
|
|
|
|
e = stpcpy(e, "var-run-bad:");
|
|
|
|
|
2017-12-07 11:35:02 +01:00
|
|
|
r = read_one_line_file("/proc/sys/kernel/overflowuid", &overflowuid);
|
|
|
|
if (r >= 0 && !streq(overflowuid, "65534"))
|
|
|
|
e = stpcpy(e, "overflowuid-not-65534:");
|
|
|
|
|
|
|
|
r = read_one_line_file("/proc/sys/kernel/overflowgid", &overflowgid);
|
|
|
|
if (r >= 0 && !streq(overflowgid, "65534"))
|
|
|
|
e = stpcpy(e, "overflowgid-not-65534:");
|
|
|
|
|
2017-12-07 11:27:07 +01:00
|
|
|
/* remove the last ':' */
|
|
|
|
if (e != buf)
|
|
|
|
e[-1] = 0;
|
|
|
|
|
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
2018-01-24 19:59:55 +01:00
|
|
|
void manager_ref_console(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
|
|
|
|
m->n_on_console++;
|
|
|
|
}
|
|
|
|
|
|
|
|
void manager_unref_console(Manager *m) {
|
|
|
|
|
|
|
|
assert(m->n_on_console > 0);
|
|
|
|
m->n_on_console--;
|
|
|
|
|
|
|
|
if (m->n_on_console == 0)
|
|
|
|
m->no_console_output = false; /* unset no_console_output flag, since the console is definitely free now */
|
|
|
|
}
|
|
|
|
|
2014-03-12 20:55:13 +01:00
|
|
|
static const char *const manager_state_table[_MANAGER_STATE_MAX] = {
|
2014-08-22 18:07:18 +02:00
|
|
|
[MANAGER_INITIALIZING] = "initializing",
|
2014-03-12 20:55:13 +01:00
|
|
|
[MANAGER_STARTING] = "starting",
|
|
|
|
[MANAGER_RUNNING] = "running",
|
|
|
|
[MANAGER_DEGRADED] = "degraded",
|
|
|
|
[MANAGER_MAINTENANCE] = "maintenance",
|
|
|
|
[MANAGER_STOPPING] = "stopping",
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(manager_state, ManagerState);
|
2017-11-20 21:01:13 +01:00
|
|
|
|
|
|
|
static const char *const manager_timestamp_table[_MANAGER_TIMESTAMP_MAX] = {
|
|
|
|
[MANAGER_TIMESTAMP_FIRMWARE] = "firmware",
|
|
|
|
[MANAGER_TIMESTAMP_LOADER] = "loader",
|
|
|
|
[MANAGER_TIMESTAMP_KERNEL] = "kernel",
|
|
|
|
[MANAGER_TIMESTAMP_INITRD] = "initrd",
|
|
|
|
[MANAGER_TIMESTAMP_USERSPACE] = "userspace",
|
|
|
|
[MANAGER_TIMESTAMP_FINISH] = "finish",
|
|
|
|
[MANAGER_TIMESTAMP_SECURITY_START] = "security-start",
|
|
|
|
[MANAGER_TIMESTAMP_SECURITY_FINISH] = "security-finish",
|
|
|
|
[MANAGER_TIMESTAMP_GENERATORS_START] = "generators-start",
|
|
|
|
[MANAGER_TIMESTAMP_GENERATORS_FINISH] = "generators-finish",
|
|
|
|
[MANAGER_TIMESTAMP_UNITS_LOAD_START] = "units-load-start",
|
|
|
|
[MANAGER_TIMESTAMP_UNITS_LOAD_FINISH] = "units-load-finish",
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(manager_timestamp, ManagerTimestamp);
|