userdbd: add new service that can merge userdb queries from multiple clients

This commit is contained in:
Lennart Poettering 2019-07-04 18:33:30 +02:00
parent 295c1a6e45
commit d093b62c94
10 changed files with 1270 additions and 0 deletions

View File

@ -243,6 +243,7 @@ conf.set_quoted('SYSTEMD_EXPORT_PATH', join_paths(rootlib
conf.set_quoted('VENDOR_KEYRING_PATH', join_paths(rootlibexecdir, 'import-pubring.gpg'))
conf.set_quoted('USER_KEYRING_PATH', join_paths(pkgsysconfdir, 'import-pubring.gpg'))
conf.set_quoted('DOCUMENT_ROOT', join_paths(pkgdatadir, 'gatewayd'))
conf.set_quoted('SYSTEMD_USERWORK_PATH', join_paths(rootlibexecdir, 'systemd-userwork'))
conf.set10('MEMORY_ACCOUNTING_DEFAULT', memory_accounting_default)
conf.set_quoted('MEMORY_ACCOUNTING_DEFAULT_YES_NO', memory_accounting_default ? 'yes' : 'no')
conf.set('STATUS_UNIT_FORMAT_DEFAULT', 'STATUS_UNIT_FORMAT_' + status_unit_format_default.to_upper())
@ -1322,6 +1323,7 @@ foreach term : ['utmp',
'localed',
'machined',
'portabled',
'userdb',
'networkd',
'timedated',
'timesyncd',
@ -1538,6 +1540,7 @@ subdir('src/kernel-install')
subdir('src/locale')
subdir('src/machine')
subdir('src/portable')
subdir('src/userdb')
subdir('src/nspawn')
subdir('src/resolve')
subdir('src/timedate')
@ -1974,6 +1977,26 @@ if conf.get('ENABLE_PORTABLED') == 1
public_programs += exe
endif
if conf.get('ENABLE_USERDB') == 1
executable('systemd-userwork',
systemd_userwork_sources,
include_directories : includes,
link_with : [libshared],
dependencies : [threads],
install_rpath : rootlibexecdir,
install : true,
install_dir : rootlibexecdir)
executable('systemd-userdbd',
systemd_userdbd_sources,
include_directories : includes,
link_with : [libshared],
dependencies : [threads],
install_rpath : rootlibexecdir,
install : true,
install_dir : rootlibexecdir)
endif
foreach alias : ['halt', 'poweroff', 'reboot', 'runlevel', 'shutdown', 'telinit']
meson.add_install_script(meson_make_symlink,
join_paths(rootbindir, 'systemctl'),
@ -3252,6 +3275,7 @@ foreach tuple : [
['logind'],
['machined'],
['portabled'],
['userdb'],
['importd'],
['hostnamed'],
['timedated'],

View File

@ -94,6 +94,8 @@ option('machined', type : 'boolean',
description : 'install the systemd-machined stack')
option('portabled', type : 'boolean',
description : 'install the systemd-portabled stack')
option('userdb', type : 'boolean',
description : 'install the systemd-userdbd stack')
option('networkd', type : 'boolean',
description : 'install the systemd-networkd stack')
option('timedated', type : 'boolean',

11
src/userdb/meson.build Normal file
View File

@ -0,0 +1,11 @@
# SPDX-License-Identifier: LGPL-2.1+
systemd_userwork_sources = files('''
userwork.c
'''.split())
systemd_userdbd_sources = files('''
userdbd-manager.c
userdbd-manager.h
userdbd.c
'''.split())

View File

@ -0,0 +1,302 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
#include <sys/wait.h>
#include "sd-daemon.h"
#include "fd-util.h"
#include "fs-util.h"
#include "mkdir.h"
#include "process-util.h"
#include "set.h"
#include "signal-util.h"
#include "socket-util.h"
#include "stdio-util.h"
#include "umask-util.h"
#include "userdbd-manager.h"
#define LISTEN_TIMEOUT_USEC (25 * USEC_PER_SEC)
static int start_workers(Manager *m, bool explicit_request);
static int on_sigchld(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
Manager *m = userdata;
assert(s);
assert(m);
for (;;) {
siginfo_t siginfo = {};
bool removed = false;
if (waitid(P_ALL, 0, &siginfo, WNOHANG|WEXITED) < 0) {
if (errno == ECHILD)
break;
log_warning_errno(errno, "Failed to invoke waitid(): %m");
break;
}
if (siginfo.si_pid == 0)
break;
if (set_remove(m->workers_dynamic, PID_TO_PTR(siginfo.si_pid)))
removed = true;
if (set_remove(m->workers_fixed, PID_TO_PTR(siginfo.si_pid)))
removed = true;
if (!removed) {
log_warning("Weird, got SIGCHLD for unknown child " PID_FMT ", ignoring.", siginfo.si_pid);
continue;
}
if (siginfo.si_code == CLD_EXITED) {
if (siginfo.si_status == EXIT_SUCCESS)
log_debug("Worker " PID_FMT " exited successfully.", siginfo.si_pid);
else
log_warning("Worker " PID_FMT " died with a failure exit status %i, ignoring.", siginfo.si_pid, siginfo.si_status);
} else if (siginfo.si_code == CLD_KILLED)
log_warning("Worker " PID_FMT " was killed by signal %s, ignoring.", siginfo.si_pid, signal_to_string(siginfo.si_status));
else if (siginfo.si_code == CLD_DUMPED)
log_warning("Worker " PID_FMT " dumped core by signal %s, ignoring.", siginfo.si_pid, signal_to_string(siginfo.si_status));
else
log_warning("Can't handle SIGCHLD of this type");
}
(void) start_workers(m, false); /* Fill up workers again if we fell below the low watermark */
return 0;
}
static int on_sigusr2(sd_event_source *s, const struct signalfd_siginfo *si, void *userdata) {
Manager *m = userdata;
assert(s);
assert(m);
(void) start_workers(m, true); /* Workers told us there's more work, let's add one more worker as long as we are below the high watermark */
return 0;
}
int manager_new(Manager **ret) {
Manager *m;
int r;
m = new(Manager, 1);
if (!m)
return -ENOMEM;
*m = (Manager) {
.listen_fd = -1,
.worker_ratelimit = {
.interval = 5 * USEC_PER_SEC,
.burst = 50,
},
};
r = sd_event_new(&m->event);
if (r < 0)
return r;
r = sd_event_add_signal(m->event, NULL, SIGINT, NULL, NULL);
if (r < 0)
return r;
r = sd_event_add_signal(m->event, NULL, SIGTERM, NULL, NULL);
if (r < 0)
return r;
(void) sd_event_set_watchdog(m->event, true);
m->workers_fixed = set_new(NULL);
m->workers_dynamic = set_new(NULL);
if (!m->workers_fixed || !m->workers_dynamic)
return -ENOMEM;
r = sd_event_add_signal(m->event, &m->sigusr2_event_source, SIGUSR2, on_sigusr2, m);
if (r < 0)
return r;
r = sd_event_add_signal(m->event, &m->sigchld_event_source, SIGCHLD, on_sigchld, m);
if (r < 0)
return r;
*ret = TAKE_PTR(m);
return 0;
}
Manager* manager_free(Manager *m) {
if (!m)
return NULL;
set_free(m->workers_fixed);
set_free(m->workers_dynamic);
sd_event_source_disable_unref(m->sigusr2_event_source);
sd_event_source_disable_unref(m->sigchld_event_source);
sd_event_unref(m->event);
return mfree(m);
}
static size_t manager_current_workers(Manager *m) {
assert(m);
return set_size(m->workers_fixed) + set_size(m->workers_dynamic);
}
static int start_one_worker(Manager *m) {
bool fixed;
pid_t pid;
int r;
assert(m);
fixed = set_size(m->workers_fixed) < USERDB_WORKERS_MIN;
r = safe_fork("(sd-worker)", FORK_RESET_SIGNALS|FORK_DEATHSIG|FORK_LOG, &pid);
if (r < 0)
return log_error_errno(r, "Failed to fork new worker child: %m");
if (r == 0) {
char pids[DECIMAL_STR_MAX(pid_t)];
/* Child */
log_close();
r = close_all_fds(&m->listen_fd, 1);
if (r < 0) {
log_error_errno(r, "Failed to close fds in child: %m");
_exit(EXIT_FAILURE);
}
log_open();
if (m->listen_fd == 3) {
r = fd_cloexec(3, false);
if (r < 0) {
log_error_errno(r, "Failed to turn off O_CLOEXEC for fd 3: %m");
_exit(EXIT_FAILURE);
}
} else {
if (dup2(m->listen_fd, 3) < 0) { /* dup2() creates with O_CLOEXEC off */
log_error_errno(errno, "Failed to move listen fd to 3: %m");
_exit(EXIT_FAILURE);
}
safe_close(m->listen_fd);
}
xsprintf(pids, PID_FMT, pid);
if (setenv("LISTEN_PID", pids, 1) < 0) {
log_error_errno(errno, "Failed to set $LISTEN_PID: %m");
_exit(EXIT_FAILURE);
}
if (setenv("LISTEN_FDS", "1", 1) < 0) {
log_error_errno(errno, "Failed to set $LISTEN_FDS: %m");
_exit(EXIT_FAILURE);
}
if (setenv("USERDB_FIXED_WORKER", one_zero(fixed), 1) < 0) {
log_error_errno(errno, "Failed to set $USERDB_FIXED_WORKER: %m");
_exit(EXIT_FAILURE);
}
/* execl("/home/lennart/projects/systemd/build/systemd-userwork", "systemd-userwork", "xxxxxxxxxxxxxxxx", NULL); /\* With some extra space rename_process() can make use of *\/ */
/* execl("/usr/bin/valgrind", "valgrind", "/home/lennart/projects/systemd/build/systemd-userwork", "systemd-userwork", "xxxxxxxxxxxxxxxx", NULL); /\* With some extra space rename_process() can make use of *\/ */
execl(SYSTEMD_USERWORK_PATH, "systemd-userwork", "xxxxxxxxxxxxxxxx", NULL); /* With some extra space rename_process() can make use of */
log_error_errno(errno, "Failed start worker process: %m");
_exit(EXIT_FAILURE);
}
if (fixed)
r = set_put(m->workers_fixed, PID_TO_PTR(pid));
else
r = set_put(m->workers_dynamic, PID_TO_PTR(pid));
if (r < 0)
return log_error_errno(r, "Failed to add child process to set: %m");
return 0;
}
static int start_workers(Manager *m, bool explicit_request) {
int r;
assert(m);
for (;;) {
size_t n;
n = manager_current_workers(m);
if (n >= USERDB_WORKERS_MIN && (!explicit_request || n >= USERDB_WORKERS_MAX))
break;
if (!ratelimit_below(&m->worker_ratelimit)) {
/* If we keep starting workers too often, let's fail the whole daemon, something is wrong */
sd_event_exit(m->event, EXIT_FAILURE);
return log_error_errno(SYNTHETIC_ERRNO(EUCLEAN), "Worker threads requested too frequently, something is wrong.");
}
r = start_one_worker(m);
if (r < 0)
return r;
explicit_request = false;
}
return 0;
}
int manager_startup(Manager *m) {
struct timeval ts;
int n, r;
assert(m);
assert(m->listen_fd < 0);
n = sd_listen_fds(false);
if (n < 0)
return log_error_errno(n, "Failed to determine number of passed file descriptors: %m");
if (n > 1)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Expected one listening fd, got %i.", n);
if (n == 1)
m->listen_fd = SD_LISTEN_FDS_START;
else {
union sockaddr_union sockaddr;
r = sockaddr_un_set_path(&sockaddr.un, "/run/systemd/userdb/io.systemd.NameServiceSwitch");
if (r < 0)
return log_error_errno(r, "Cannot assign socket path to socket address: %m");
r = mkdir_p("/run/systemd/userdb", 0755);
if (r < 0)
return log_error_errno(r, "Failed to create /run/systemd/userdb: %m");
m->listen_fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0);
if (m->listen_fd < 0)
return log_error_errno(errno, "Failed to bind on socket: %m");
(void) sockaddr_un_unlink(&sockaddr.un);
RUN_WITH_UMASK(0000)
if (bind(m->listen_fd, &sockaddr.sa, SOCKADDR_UN_LEN(sockaddr.un)) < 0)
return log_error_errno(errno, "Failed to bind socket: %m");
r = symlink_idempotent("io.systemd.NameServiceSwitch", "/run/systemd/userdb/io.systemd.Multiplexer", false);
if (r < 0)
return log_error_errno(r, "Failed to bind io.systemd.Multiplexer: %m");
if (listen(m->listen_fd, SOMAXCONN) < 0)
return log_error_errno(errno, "Failed to listen on socket: %m");
}
/* Let's make sure every accept() call on this socket times out after 25s. This allows workers to be
* GC'ed on idle */
if (setsockopt(m->listen_fd, SOL_SOCKET, SO_RCVTIMEO, timeval_store(&ts, LISTEN_TIMEOUT_USEC), sizeof(ts)) < 0)
return log_error_errno(errno, "Failed to se SO_RCVTIMEO: %m");
return start_workers(m, false);
}

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
#pragma once
#include "sd-bus.h"
#include "sd-event.h"
typedef struct Manager Manager;
#include "hashmap.h"
#include "varlink.h"
#include "ratelimit.h"
#define USERDB_WORKERS_MIN 3
#define USERDB_WORKERS_MAX 4096
struct Manager {
sd_event *event;
Set *workers_fixed; /* Workers 0…USERDB_WORKERS_MIN */
Set *workers_dynamic; /* Workers USERD_WORKERS_MIN+1…USERDB_WORKERS_MAX */
sd_event_source *sigusr2_event_source;
sd_event_source *sigchld_event_source;
int listen_fd;
RateLimit worker_ratelimit;
};
int manager_new(Manager **ret);
Manager* manager_free(Manager *m);
DEFINE_TRIVIAL_CLEANUP_FUNC(Manager*, manager_free);
int manager_startup(Manager *m);

56
src/userdb/userdbd.c Normal file
View File

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
#include <sys/stat.h>
#include <sys/types.h>
#include "daemon-util.h"
#include "userdbd-manager.h"
#include "log.h"
#include "main-func.h"
#include "signal-util.h"
/* This service offers two Varlink services, both implementing io.systemd.UserDatabase:
*
* io.systemd.NameServiceSwitch: this is a compatibility interface for glibc NSS: it response to
* name lookups by checking the classic NSS interfaces and responding that.
*
* io.systemd.Multiplexer: this multiplexes lookup requests to all Varlink services that have a
* socket in /run/systemd/userdb/. It's supposed to simplify clients that don't want to implement
* the full iterative logic on their own.
*/
static int run(int argc, char *argv[]) {
_cleanup_(notify_on_cleanup) const char *notify_stop = NULL;
_cleanup_(manager_freep) Manager *m = NULL;
int r;
log_setup_service();
umask(0022);
if (argc != 1)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "This program takes no arguments.");
if (setenv("SYSTEMD_BYPASS_USERDB", "io.systemd.NameServiceSwitch:io.systemd.Multiplexer", 1) < 0)
return log_error_errno(errno, "Failed to se $SYSTEMD_BYPASS_USERDB: %m");
assert_se(sigprocmask_many(SIG_BLOCK, NULL, SIGCHLD, SIGTERM, SIGINT, SIGUSR2, -1) >= 0);
r = manager_new(&m);
if (r < 0)
return log_error_errno(r, "Could not create manager: %m");
r = manager_startup(m);
if (r < 0)
return log_error_errno(r, "Failed to start up daemon: %m");
notify_stop = notify_start(NOTIFY_READY, NOTIFY_STOPPING);
r = sd_event_loop(m->event);
if (r < 0)
return log_error_errno(r, "Event loop failed: %m");
return 0;
}
DEFINE_MAIN_FUNCTION(run);

778
src/userdb/userwork.c Normal file
View File

@ -0,0 +1,778 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
#include <poll.h>
#include <sys/wait.h>
#include "sd-daemon.h"
#include "env-util.h"
#include "fd-util.h"
#include "group-record-nss.h"
#include "group-record.h"
#include "main-func.h"
#include "process-util.h"
#include "strv.h"
#include "time-util.h"
#include "user-record-nss.h"
#include "user-record.h"
#include "user-util.h"
#include "userdb.h"
#include "varlink.h"
#define ITERATIONS_MAX 64U
#define RUNTIME_MAX_USEC (5 * USEC_PER_MINUTE)
#define PRESSURE_SLEEP_TIME_USEC (50 * USEC_PER_MSEC)
#define CONNECTION_IDLE_USEC (15 * USEC_PER_SEC)
#define LISTEN_IDLE_USEC (90 * USEC_PER_SEC)
typedef struct LookupParameters {
const char *user_name;
const char *group_name;
union {
uid_t uid;
gid_t gid;
};
const char *service;
} LookupParameters;
static int add_nss_service(JsonVariant **v) {
_cleanup_(json_variant_unrefp) JsonVariant *status = NULL, *z = NULL;
char buf[SD_ID128_STRING_MAX];
sd_id128_t mid;
int r;
assert(v);
/* Patch in service field if it's missing. The assumption here is that this field is unset only for
* NSS records */
if (json_variant_by_key(*v, "service"))
return 0;
r = sd_id128_get_machine(&mid);
if (r < 0)
return r;
status = json_variant_ref(json_variant_by_key(*v, "status"));
z = json_variant_ref(json_variant_by_key(status, sd_id128_to_string(mid, buf)));
if (json_variant_by_key(z, "service"))
return 0;
r = json_variant_set_field_string(&z, "service", "io.systemd.NameServiceSwitch");
if (r < 0)
return r;
r = json_variant_set_field(&status, buf, z);
if (r < 0)
return r;
return json_variant_set_field(v, "status", status);
}
static int build_user_json(Varlink *link, UserRecord *ur, JsonVariant **ret) {
_cleanup_(user_record_unrefp) UserRecord *stripped = NULL;
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
UserRecordLoadFlags flags;
uid_t peer_uid;
bool trusted;
int r;
assert(ur);
assert(ret);
r = varlink_get_peer_uid(link, &peer_uid);
if (r < 0) {
log_debug_errno(r, "Unable to query peer UID, ignoring: %m");
trusted = false;
} else
trusted = peer_uid == 0 || peer_uid == ur->uid;
flags = USER_RECORD_REQUIRE_REGULAR|USER_RECORD_ALLOW_PER_MACHINE|USER_RECORD_ALLOW_BINDING|USER_RECORD_STRIP_SECRET|USER_RECORD_ALLOW_STATUS|USER_RECORD_ALLOW_SIGNATURE;
if (trusted)
flags |= USER_RECORD_ALLOW_PRIVILEGED;
else
flags |= USER_RECORD_STRIP_PRIVILEGED;
r = user_record_clone(ur, flags, &stripped);
if (r < 0)
return r;
stripped->incomplete =
ur->incomplete ||
(FLAGS_SET(ur->mask, USER_RECORD_PRIVILEGED) &&
!FLAGS_SET(stripped->mask, USER_RECORD_PRIVILEGED));
v = json_variant_ref(stripped->json);
r = add_nss_service(&v);
if (r < 0)
return r;
return json_build(ret, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("record", JSON_BUILD_VARIANT(v)),
JSON_BUILD_PAIR("incomplete", JSON_BUILD_BOOLEAN(stripped->incomplete))));
}
static int vl_method_get_user_record(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
static const JsonDispatch dispatch_table[] = {
{ "uid", JSON_VARIANT_UNSIGNED, json_dispatch_uid_gid, offsetof(LookupParameters, uid), 0 },
{ "userName", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, user_name), 0 },
{ "service", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, service), 0 },
{}
};
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
_cleanup_(user_record_unrefp) UserRecord *hr = NULL;
LookupParameters p = {
.uid = UID_INVALID,
};
int r;
assert(parameters);
r = json_dispatch(parameters, dispatch_table, NULL, 0, &p);
if (r < 0)
return r;
if (streq_ptr(p.service, "io.systemd.NameServiceSwitch")) {
if (uid_is_valid(p.uid))
r = nss_user_record_by_uid(p.uid, &hr);
else if (p.user_name)
r = nss_user_record_by_name(p.user_name, &hr);
else {
_cleanup_(json_variant_unrefp) JsonVariant *last = NULL;
setpwent();
for (;;) {
_cleanup_(user_record_unrefp) UserRecord *z = NULL;
_cleanup_free_ char *sbuf = NULL;
struct passwd *pw;
struct spwd spwd;
errno = 0;
pw = getpwent();
if (!pw) {
if (errno != 0)
log_debug_errno(errno, "Failure while iterating through NSS user database, ignoring: %m");
break;
}
r = nss_spwd_for_passwd(pw, &spwd, &sbuf);
if (r < 0)
log_debug_errno(r, "Failed to acquire shadow entry for user %s, ignoring: %m", pw->pw_name);
r = nss_passwd_to_user_record(pw, NULL, &z);
if (r < 0) {
endpwent();
return r;
}
if (last) {
r = varlink_notify(link, last);
if (r < 0) {
endpwent();
return r;
}
last = json_variant_unref(last);
}
r = build_user_json(link, z, &last);
if (r < 0) {
endpwent();
return r;
}
}
endpwent();
if (!last)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
return varlink_reply(link, last);
}
} else if (streq_ptr(p.service, "io.systemd.Multiplexer")) {
if (uid_is_valid(p.uid))
r = userdb_by_uid(p.uid, USERDB_AVOID_MULTIPLEXER, &hr);
else if (p.user_name)
r = userdb_by_name(p.user_name, USERDB_AVOID_MULTIPLEXER, &hr);
else {
_cleanup_(userdb_iterator_freep) UserDBIterator *iterator = NULL;
_cleanup_(json_variant_unrefp) JsonVariant *last = NULL;
r = userdb_all(USERDB_AVOID_MULTIPLEXER, &iterator);
if (r < 0)
return r;
for (;;) {
_cleanup_(user_record_unrefp) UserRecord *z = NULL;
r = userdb_iterator_get(iterator, &z);
if (r == -ESRCH)
break;
if (r < 0)
return r;
if (last) {
r = varlink_notify(link, last);
if (r < 0)
return r;
last = json_variant_unref(last);
}
r = build_user_json(link, z, &last);
if (r < 0)
return r;
}
if (!last)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
return varlink_reply(link, last);
}
} else
return varlink_error(link, "io.systemd.UserDatabase.BadService", NULL);
if (r == -ESRCH)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
if (r < 0) {
log_debug_errno(r, "User lookup failed abnormally: %m");
return varlink_error(link, "io.systemd.UserDatabase.ServiceNotAvailable", NULL);
}
if ((uid_is_valid(p.uid) && hr->uid != p.uid) ||
(p.user_name && !streq(hr->user_name, p.user_name)))
return varlink_error(link, "io.systemd.UserDatabase.ConflictingRecordFound", NULL);
r = build_user_json(link, hr, &v);
if (r < 0)
return r;
return varlink_reply(link, v);
}
static int build_group_json(Varlink *link, GroupRecord *gr, JsonVariant **ret) {
_cleanup_(group_record_unrefp) GroupRecord *stripped = NULL;
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
UserRecordLoadFlags flags;
uid_t peer_uid;
bool trusted;
int r;
assert(gr);
assert(ret);
r = varlink_get_peer_uid(link, &peer_uid);
if (r < 0) {
log_debug_errno(r, "Unable to query peer UID, ignoring: %m");
trusted = false;
} else
trusted = peer_uid == 0;
flags = USER_RECORD_REQUIRE_REGULAR|USER_RECORD_ALLOW_PER_MACHINE|USER_RECORD_ALLOW_BINDING|USER_RECORD_STRIP_SECRET|USER_RECORD_ALLOW_STATUS|USER_RECORD_ALLOW_SIGNATURE;
if (trusted)
flags |= USER_RECORD_ALLOW_PRIVILEGED;
else
flags |= USER_RECORD_STRIP_PRIVILEGED;
r = group_record_clone(gr, flags, &stripped);
if (r < 0)
return r;
stripped->incomplete =
gr->incomplete ||
(FLAGS_SET(gr->mask, USER_RECORD_PRIVILEGED) &&
!FLAGS_SET(stripped->mask, USER_RECORD_PRIVILEGED));
v = json_variant_ref(gr->json);
r = add_nss_service(&v);
if (r < 0)
return r;
return json_build(ret, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("record", JSON_BUILD_VARIANT(v)),
JSON_BUILD_PAIR("incomplete", JSON_BUILD_BOOLEAN(stripped->incomplete))));
}
static int vl_method_get_group_record(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
static const JsonDispatch dispatch_table[] = {
{ "gid", JSON_VARIANT_UNSIGNED, json_dispatch_uid_gid, offsetof(LookupParameters, gid), 0 },
{ "groupName", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, group_name), 0 },
{ "service", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, service), 0 },
{}
};
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
_cleanup_(group_record_unrefp) GroupRecord *g = NULL;
LookupParameters p = {
.gid = GID_INVALID,
};
int r;
assert(parameters);
r = json_dispatch(parameters, dispatch_table, NULL, 0, &p);
if (r < 0)
return r;
if (streq_ptr(p.service, "io.systemd.NameServiceSwitch")) {
if (gid_is_valid(p.gid))
r = nss_group_record_by_gid(p.gid, &g);
else if (p.group_name)
r = nss_group_record_by_name(p.group_name, &g);
else {
_cleanup_(json_variant_unrefp) JsonVariant *last = NULL;
setgrent();
for (;;) {
_cleanup_(group_record_unrefp) GroupRecord *z = NULL;
_cleanup_free_ char *sbuf = NULL;
struct group *grp;
struct sgrp sgrp;
errno = 0;
grp = getgrent();
if (!grp) {
if (errno != 0)
log_debug_errno(errno, "Failure while iterating through NSS group database, ignoring: %m");
break;
}
r = nss_sgrp_for_group(grp, &sgrp, &sbuf);
if (r < 0)
log_debug_errno(r, "Failed to acquire shadow entry for group %s, ignoring: %m", grp->gr_name);
r = nss_group_to_group_record(grp, r >= 0 ? &sgrp : NULL, &z);
if (r < 0) {
endgrent();
return r;
}
if (last) {
r = varlink_notify(link, last);
if (r < 0) {
endgrent();
return r;
}
last = json_variant_unref(last);
}
r = build_group_json(link, z, &last);
if (r < 0) {
endgrent();
return r;
}
}
endgrent();
if (!last)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
return varlink_reply(link, last);
}
} else if (streq_ptr(p.service, "io.systemd.Multiplexer")) {
if (gid_is_valid(p.gid))
r = groupdb_by_gid(p.gid, USERDB_AVOID_MULTIPLEXER, &g);
else if (p.group_name)
r = groupdb_by_name(p.group_name, USERDB_AVOID_MULTIPLEXER, &g);
else {
_cleanup_(userdb_iterator_freep) UserDBIterator *iterator = NULL;
_cleanup_(json_variant_unrefp) JsonVariant *last = NULL;
r = groupdb_all(USERDB_AVOID_MULTIPLEXER, &iterator);
if (r < 0)
return r;
for (;;) {
_cleanup_(group_record_unrefp) GroupRecord *z = NULL;
r = groupdb_iterator_get(iterator, &z);
if (r == -ESRCH)
break;
if (r < 0)
return r;
if (last) {
r = varlink_notify(link, last);
if (r < 0)
return r;
last = json_variant_unref(last);
}
r = build_group_json(link, z, &last);
if (r < 0)
return r;
}
if (!last)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
return varlink_reply(link, last);
}
} else
return varlink_error(link, "io.systemd.UserDatabase.BadService", NULL);
if (r == -ESRCH)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
if (r < 0) {
log_debug_errno(r, "Group lookup failed abnormally: %m");
return varlink_error(link, "io.systemd.UserDatabase.ServiceNotAvailable", NULL);
}
if ((uid_is_valid(p.gid) && g->gid != p.gid) ||
(p.group_name && !streq(g->group_name, p.group_name)))
return varlink_error(link, "io.systemd.UserDatabase.ConflictingRecordFound", NULL);
r = build_group_json(link, g, &v);
if (r < 0)
return r;
return varlink_reply(link, v);
}
static int vl_method_get_memberships(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
static const JsonDispatch dispatch_table[] = {
{ "userName", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, user_name), 0 },
{ "groupName", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, group_name), 0 },
{ "service", JSON_VARIANT_STRING, json_dispatch_const_string, offsetof(LookupParameters, service), 0 },
{}
};
LookupParameters p = {};
int r;
assert(parameters);
r = json_dispatch(parameters, dispatch_table, NULL, 0, &p);
if (r < 0)
return r;
if (streq_ptr(p.service, "io.systemd.NameServiceSwitch")) {
if (p.group_name) {
_cleanup_(group_record_unrefp) GroupRecord *g = NULL;
const char *last = NULL;
char **i;
r = nss_group_record_by_name(p.group_name, &g);
if (r == -ESRCH)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
if (r < 0)
return r;
STRV_FOREACH(i, g->members) {
if (p.user_name && !streq_ptr(p.user_name, *i))
continue;
if (last) {
r = varlink_notifyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(g->group_name))));
if (r < 0)
return r;
}
last = *i;
}
if (!last)
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
return varlink_replyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(g->group_name))));
} else {
_cleanup_free_ char *last_user_name = NULL, *last_group_name = NULL;
setgrent();
for (;;) {
struct group *grp;
const char* two[2], **users, **i;
errno = 0;
grp = getgrent();
if (!grp) {
if (errno != 0)
log_debug_errno(errno, "Failure while iterating through NSS group database, ignoring: %m");
break;
}
if (p.user_name) {
if (!strv_contains(grp->gr_mem, p.user_name))
continue;
two[0] = p.user_name;
two[1] = NULL;
users = two;
} else
users = (const char**) grp->gr_mem;
STRV_FOREACH(i, users) {
if (last_user_name) {
assert(last_group_name);
r = varlink_notifyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last_user_name)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(last_group_name))));
if (r < 0) {
endgrent();
return r;
}
free(last_user_name);
free(last_group_name);
}
last_user_name = strdup(*i);
last_group_name = strdup(grp->gr_name);
if (!last_user_name || !last_group_name) {
endgrent();
return -ENOMEM;
}
}
}
endgrent();
if (!last_user_name) {
assert(!last_group_name);
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
}
assert(last_group_name);
return varlink_replyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last_user_name)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(last_group_name))));
}
} else if (streq_ptr(p.service, "io.systemd.Multiplexer")) {
_cleanup_free_ char *last_user_name = NULL, *last_group_name = NULL;
_cleanup_(userdb_iterator_freep) UserDBIterator *iterator = NULL;
if (p.group_name)
r = membershipdb_by_group(p.group_name, USERDB_AVOID_MULTIPLEXER, &iterator);
else if (p.user_name)
r = membershipdb_by_user(p.user_name, USERDB_AVOID_MULTIPLEXER, &iterator);
else
r = membershipdb_all(USERDB_AVOID_MULTIPLEXER, &iterator);
if (r < 0)
return r;
for (;;) {
_cleanup_free_ char *user_name = NULL, *group_name = NULL;
r = membershipdb_iterator_get(iterator, &user_name, &group_name);
if (r == -ESRCH)
break;
if (r < 0)
return r;
/* If both group + user are specified do a-posteriori filtering */
if (p.group_name && p.user_name && !streq(group_name, p.group_name))
continue;
if (last_user_name) {
assert(last_group_name);
r = varlink_notifyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last_user_name)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(last_group_name))));
if (r < 0)
return r;
free(last_user_name);
free(last_group_name);
}
last_user_name = TAKE_PTR(user_name);
last_group_name = TAKE_PTR(group_name);
}
if (!last_user_name) {
assert(!last_group_name);
return varlink_error(link, "io.systemd.UserDatabase.NoRecordFound", NULL);
}
assert(last_group_name);
return varlink_replyb(link, JSON_BUILD_OBJECT(
JSON_BUILD_PAIR("userName", JSON_BUILD_STRING(last_user_name)),
JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(last_group_name))));
}
return varlink_error(link, "io.systemd.UserDatabase.BadService", NULL);
}
static int process_connection(VarlinkServer *server, int fd) {
_cleanup_(varlink_close_unrefp) Varlink *vl = NULL;
int r;
r = varlink_server_add_connection(server, fd, &vl);
if (r < 0) {
fd = safe_close(fd);
return log_error_errno(r, "Failed to add connection: %m");
}
vl = varlink_ref(vl);
for (;;) {
r = varlink_process(vl);
if (r == -ENOTCONN) {
log_debug("Connection terminated.");
break;
}
if (r < 0)
return log_error_errno(r, "Failed to process connection: %m");
if (r > 0)
continue;
r = varlink_wait(vl, CONNECTION_IDLE_USEC);
if (r < 0)
return log_error_errno(r, "Failed to wait for connection events: %m");
if (r == 0)
break;
}
return 0;
}
static int run(int argc, char *argv[]) {
usec_t start_time, listen_idle_usec, last_busy_usec = USEC_INFINITY;
_cleanup_(varlink_server_unrefp) VarlinkServer *server = NULL;
_cleanup_close_ int lock = -1;
unsigned n_iterations = 0;
int m, listen_fd, r;
log_setup_service();
m = sd_listen_fds(false);
if (m < 0)
return log_error_errno(m, "Failed to determine number of listening fds: %m");
if (m == 0)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "No socket to listen on received.");
if (m > 1)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Worker can only listen on a single socket at a time.");
listen_fd = SD_LISTEN_FDS_START;
r = fd_nonblock(listen_fd, false);
if (r < 0)
return log_error_errno(r, "Failed to turn off non-blocking mode for listening socket: %m");
r = varlink_server_new(&server, 0);
if (r < 0)
return log_error_errno(r, "Failed to allocate server: %m");
r = varlink_server_bind_method_many(
server,
"io.systemd.UserDatabase.GetUserRecord", vl_method_get_user_record,
"io.systemd.UserDatabase.GetGroupRecord", vl_method_get_group_record,
"io.systemd.UserDatabase.GetMemberships", vl_method_get_memberships);
if (r < 0)
return log_error_errno(r, "Failed to bind methods: %m");
r = getenv_bool("USERDB_FIXED_WORKER");
if (r < 0)
return log_error_errno(r, "Failed to parse USERDB_FIXED_WORKER: %m");
listen_idle_usec = r ? USEC_INFINITY : LISTEN_IDLE_USEC;
lock = userdb_nss_compat_disable();
if (lock < 0)
return log_error_errno(r, "Failed to disable userdb NSS compatibility: %m");
start_time = now(CLOCK_MONOTONIC);
for (;;) {
_cleanup_close_ int fd = -1;
usec_t n;
/* Exit the worker in regular intervals, to flush out all memory use */
if (n_iterations++ > ITERATIONS_MAX) {
log_debug("Exiting worker, processed %u iterations, that's enough.", n_iterations);
break;
}
n = now(CLOCK_MONOTONIC);
if (n >= usec_add(start_time, RUNTIME_MAX_USEC)) {
char buf[FORMAT_TIMESPAN_MAX];
log_debug("Exiting worker, ran for %s, that's enough.", format_timespan(buf, sizeof(buf), usec_sub_unsigned(n, start_time), 0));
break;
}
if (last_busy_usec == USEC_INFINITY)
last_busy_usec = n;
else if (listen_idle_usec != USEC_INFINITY && n >= usec_add(last_busy_usec, listen_idle_usec)) {
char buf[FORMAT_TIMESPAN_MAX];
log_debug("Exiting worker, been idle for %s, .", format_timespan(buf, sizeof(buf), usec_sub_unsigned(n, last_busy_usec), 0));
break;
}
(void) rename_process("systemd-userwork: waiting...");
fd = accept4(listen_fd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
if (fd < 0)
fd = -errno;
(void) rename_process("systemd-userwork: processing...");
if (fd == -EAGAIN)
continue; /* The listening socket as SO_RECVTIMEO set, hence a time-out is expected
* after a while, let's check if it's time to exit though. */
if (fd == -EINTR)
continue; /* Might be that somebody attached via strace, let's just continue in that
* case */
if (fd < 0)
return log_error_errno(fd, "Failed to accept() from listening socket: %m");
if (now(CLOCK_MONOTONIC) <= usec_add(n, PRESSURE_SLEEP_TIME_USEC)) {
struct pollfd pfd = {
.fd = listen_fd,
.events = POLLIN,
};
/* We only slept a very short time? If so, let's see if there are more sockets
* pending, and if so, let's ask our parent for more workers */
if (poll(&pfd, 1, 0) < 0)
return log_error_errno(errno, "Failed to test for POLLIN on listening socket: %m");
if (FLAGS_SET(pfd.revents, POLLIN)) {
pid_t parent;
parent = getppid();
if (parent <= 1)
return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Parent already died?");
if (kill(parent, SIGUSR1) < 0)
return log_error_errno(errno, "Failed to kill our own parent.");
}
}
(void) process_connection(server, TAKE_FD(fd));
last_busy_usec = USEC_INFINITY;
}
return 0;
}
DEFINE_MAIN_FUNCTION(run);

View File

@ -96,6 +96,8 @@ units = [
'sockets.target.wants/'],
['systemd-journald.socket', '',
'sockets.target.wants/'],
['systemd-userdbd.socket', 'ENABLE_USERDB',
'sockets.target.wants/'],
['systemd-networkd.socket', 'ENABLE_NETWORKD'],
['systemd-poweroff.service', ''],
['systemd-reboot.service', ''],
@ -182,6 +184,7 @@ in_units = [
['systemd-nspawn@.service', ''],
['systemd-portabled.service', 'ENABLE_PORTABLED',
'dbus-org.freedesktop.portable1.service'],
['systemd-userdbd.service', 'ENABLE_USERDB'],
['systemd-quotacheck.service', 'ENABLE_QUOTACHECK'],
['systemd-random-seed.service', 'ENABLE_RANDOMSEED',
'sysinit.target.wants/'],

View File

@ -0,0 +1,41 @@
# SPDX-License-Identifier: LGPL-2.1+
#
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
[Unit]
Description=User Database Manager
Documentation=man:systemd-userdbd.service(8)
Requires=systemd-userdbd.socket
After=systemd-userdbd.socket
Before=sysinit.target
DefaultDependencies=no
[Service]
CapabilityBoundingSet=CAP_DAC_READ_SEARCH
ExecStart=@rootlibexecdir@/systemd-userdbd
IPAddressDeny=any
LimitNOFILE=@HIGH_RLIMIT_NOFILE@
LockPersonality=yes
MemoryDenyWriteExecute=yes
NoNewPrivileges=yes
PrivateDevices=yes
ProtectControlGroups=yes
ProtectHome=yes
ProtectHostname=yes
ProtectKernelLogs=yes
ProtectKernelModules=yes
ProtectSystem=strict
RestrictAddressFamilies=AF_UNIX AF_NETLINK AF_INET AF_INET6
RestrictNamespaces=yes
RestrictRealtime=yes
RestrictSUIDSGID=yes
SystemCallArchitectures=native
SystemCallErrorNumber=EPERM
SystemCallFilter=@system-service
Type=notify
@SERVICE_WATCHDOG@

View File

@ -0,0 +1,19 @@
# SPDX-License-Identifier: LGPL-2.1+
#
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
[Unit]
Description=User Database Manager Socket
Documentation=man:systemd-userdbd.service(8)
DefaultDependencies=no
Before=sockets.target
[Socket]
ListenStream=/run/systemd/userdb/io.systemd.Multiplexer
Symlinks=/run/systemd/userdb/io.systemd.NameServiceSwitch
SocketMode=0666