2011-10-07 21:06:39 +02:00
|
|
|
/***
|
|
|
|
This file is part of systemd.
|
|
|
|
|
|
|
|
Copyright 2011 Lennart Poettering
|
|
|
|
|
|
|
|
systemd is free software; you can redistribute it and/or modify it
|
2012-04-12 00:20:58 +02:00
|
|
|
under the terms of the GNU Lesser General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2.1 of the License, or
|
2011-10-07 21:06:39 +02:00
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
systemd is distributed in the hope that it will be useful, but
|
|
|
|
WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
2012-04-12 00:20:58 +02:00
|
|
|
Lesser General Public License for more details.
|
2011-10-07 21:06:39 +02:00
|
|
|
|
2012-04-12 00:20:58 +02:00
|
|
|
You should have received a copy of the GNU Lesser General Public License
|
2011-10-07 21:06:39 +02:00
|
|
|
along with systemd; If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
***/
|
|
|
|
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <errno.h>
|
2011-10-07 21:06:39 +02:00
|
|
|
#include <fcntl.h>
|
2013-12-29 01:47:36 +01:00
|
|
|
#include <fnmatch.h>
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <getopt.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <locale.h>
|
|
|
|
#include <poll.h>
|
|
|
|
#include <signal.h>
|
2011-10-07 21:06:39 +02:00
|
|
|
#include <stddef.h>
|
2011-10-14 04:44:50 +02:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <string.h>
|
2014-10-23 00:28:17 +02:00
|
|
|
#include <sys/inotify.h>
|
2015-09-23 03:01:06 +02:00
|
|
|
#include <sys/stat.h>
|
|
|
|
#include <unistd.h>
|
2011-10-07 21:06:39 +02:00
|
|
|
|
2014-10-23 00:28:17 +02:00
|
|
|
#include "sd-bus.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "sd-journal.h"
|
|
|
|
|
2015-01-18 05:27:39 +01:00
|
|
|
#include "acl-util.h"
|
2015-10-27 03:01:06 +01:00
|
|
|
#include "alloc-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "bus-error.h"
|
|
|
|
#include "bus-util.h"
|
|
|
|
#include "catalog.h"
|
2015-10-26 20:39:23 +01:00
|
|
|
#include "chattr-util.h"
|
2015-10-25 13:14:12 +01:00
|
|
|
#include "fd-util.h"
|
2013-07-19 10:02:50 +02:00
|
|
|
#include "fileio.h"
|
2015-10-26 21:16:26 +01:00
|
|
|
#include "fs-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "fsprg.h"
|
2015-10-27 01:48:17 +01:00
|
|
|
#include "glob-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "hostname-util.h"
|
2015-10-25 14:08:25 +01:00
|
|
|
#include "io-util.h"
|
2012-08-13 20:31:10 +02:00
|
|
|
#include "journal-def.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "journal-internal.h"
|
2012-08-20 22:02:19 +02:00
|
|
|
#include "journal-qrcode.h"
|
2014-11-03 23:08:33 +01:00
|
|
|
#include "journal-vacuum.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "journal-verify.h"
|
2015-10-26 23:01:30 +01:00
|
|
|
#include "locale-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "log.h"
|
|
|
|
#include "logs-show.h"
|
2014-10-23 00:28:17 +02:00
|
|
|
#include "mkdir.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "pager.h"
|
2015-10-26 16:18:16 +01:00
|
|
|
#include "parse-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "path-util.h"
|
2015-10-26 19:40:43 +01:00
|
|
|
#include "rlimit-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "set.h"
|
|
|
|
#include "sigbus.h"
|
|
|
|
#include "strv.h"
|
2015-10-27 00:40:25 +01:00
|
|
|
#include "syslog-util.h"
|
2015-04-10 23:15:59 +02:00
|
|
|
#include "terminal-util.h"
|
2016-02-01 10:44:58 +01:00
|
|
|
#include "udev.h"
|
|
|
|
#include "udev-util.h"
|
2015-09-23 03:01:06 +02:00
|
|
|
#include "unit-name.h"
|
2015-10-25 22:32:30 +01:00
|
|
|
#include "user-util.h"
|
2012-08-13 20:31:10 +02:00
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
#define DEFAULT_FSS_INTERVAL_USEC (15*USEC_PER_MINUTE)
|
2011-10-12 05:29:08 +02:00
|
|
|
|
2014-10-22 20:23:45 +02:00
|
|
|
enum {
|
|
|
|
/* Special values for arg_lines */
|
|
|
|
ARG_LINES_DEFAULT = -2,
|
|
|
|
ARG_LINES_ALL = -1,
|
|
|
|
};
|
|
|
|
|
2012-01-04 18:33:36 +01:00
|
|
|
static OutputMode arg_output = OUTPUT_SHORT;
|
2014-10-02 14:39:29 +02:00
|
|
|
static bool arg_utc = false;
|
2013-03-07 20:44:35 +01:00
|
|
|
static bool arg_pager_end = false;
|
2011-12-21 18:17:22 +01:00
|
|
|
static bool arg_follow = false;
|
2013-10-07 03:55:18 +02:00
|
|
|
static bool arg_full = true;
|
2012-10-18 23:22:56 +02:00
|
|
|
static bool arg_all = false;
|
2011-12-21 18:59:56 +01:00
|
|
|
static bool arg_no_pager = false;
|
2014-10-22 20:23:45 +02:00
|
|
|
static int arg_lines = ARG_LINES_DEFAULT;
|
2012-01-04 15:27:31 +01:00
|
|
|
static bool arg_no_tail = false;
|
2012-03-14 19:54:22 +01:00
|
|
|
static bool arg_quiet = false;
|
2012-09-06 01:49:00 +02:00
|
|
|
static bool arg_merge = false;
|
2013-07-16 21:56:22 +02:00
|
|
|
static bool arg_boot = false;
|
2013-12-26 01:52:01 +01:00
|
|
|
static sd_id128_t arg_boot_id = {};
|
|
|
|
static int arg_boot_offset = 0;
|
2013-05-15 05:08:00 +02:00
|
|
|
static bool arg_dmesg = false;
|
2016-04-20 20:09:57 +02:00
|
|
|
static bool arg_no_hostname = false;
|
2012-09-27 23:25:23 +02:00
|
|
|
static const char *arg_cursor = NULL;
|
2013-07-16 16:21:18 +02:00
|
|
|
static const char *arg_after_cursor = NULL;
|
|
|
|
static bool arg_show_cursor = false;
|
2012-07-11 01:08:38 +02:00
|
|
|
static const char *arg_directory = NULL;
|
2013-06-06 01:30:17 +02:00
|
|
|
static char **arg_file = NULL;
|
2016-04-25 00:31:24 +02:00
|
|
|
static bool arg_file_stdin = false;
|
2012-07-27 10:31:33 +02:00
|
|
|
static int arg_priorities = 0xFF;
|
2012-08-17 00:45:18 +02:00
|
|
|
static const char *arg_verify_key = NULL;
|
2012-08-20 16:51:46 +02:00
|
|
|
#ifdef HAVE_GCRYPT
|
2012-08-17 00:45:18 +02:00
|
|
|
static usec_t arg_interval = DEFAULT_FSS_INTERVAL_USEC;
|
2013-07-15 05:13:09 +02:00
|
|
|
static bool arg_force = false;
|
2012-08-20 16:51:46 +02:00
|
|
|
#endif
|
2012-10-11 16:42:46 +02:00
|
|
|
static usec_t arg_since, arg_until;
|
|
|
|
static bool arg_since_set = false, arg_until_set = false;
|
2014-08-19 11:27:34 +02:00
|
|
|
static char **arg_syslog_identifier = NULL;
|
2013-04-12 09:14:43 +02:00
|
|
|
static char **arg_system_units = NULL;
|
|
|
|
static char **arg_user_units = NULL;
|
2012-10-18 03:29:19 +02:00
|
|
|
static const char *arg_field = NULL;
|
2012-11-15 23:03:31 +01:00
|
|
|
static bool arg_catalog = false;
|
2013-03-01 10:27:10 +01:00
|
|
|
static bool arg_reverse = false;
|
2013-06-05 01:33:34 +02:00
|
|
|
static int arg_journal_type = 0;
|
2015-10-22 19:54:29 +02:00
|
|
|
static char *arg_root = NULL;
|
2013-12-11 22:04:03 +01:00
|
|
|
static const char *arg_machine = NULL;
|
2015-10-02 23:21:59 +02:00
|
|
|
static uint64_t arg_vacuum_size = 0;
|
|
|
|
static uint64_t arg_vacuum_n_files = 0;
|
|
|
|
static usec_t arg_vacuum_time = 0;
|
2011-12-19 22:35:46 +01:00
|
|
|
|
2012-08-13 20:31:10 +02:00
|
|
|
static enum {
|
|
|
|
ACTION_SHOW,
|
|
|
|
ACTION_NEW_ID128,
|
|
|
|
ACTION_PRINT_HEADER,
|
2012-08-15 01:54:09 +02:00
|
|
|
ACTION_SETUP_KEYS,
|
2012-09-07 23:20:28 +02:00
|
|
|
ACTION_VERIFY,
|
|
|
|
ACTION_DISK_USAGE,
|
2012-11-15 23:03:31 +01:00
|
|
|
ACTION_LIST_CATALOG,
|
2013-03-20 01:54:04 +01:00
|
|
|
ACTION_DUMP_CATALOG,
|
2013-10-29 04:43:57 +01:00
|
|
|
ACTION_UPDATE_CATALOG,
|
|
|
|
ACTION_LIST_BOOTS,
|
2014-10-23 00:28:17 +02:00
|
|
|
ACTION_FLUSH,
|
2015-11-11 16:21:30 +01:00
|
|
|
ACTION_SYNC,
|
2015-09-30 21:54:58 +02:00
|
|
|
ACTION_ROTATE,
|
2014-11-03 23:08:33 +01:00
|
|
|
ACTION_VACUUM,
|
2016-01-27 19:01:42 +01:00
|
|
|
ACTION_LIST_FIELDS,
|
|
|
|
ACTION_LIST_FIELD_NAMES,
|
2012-08-13 20:31:10 +02:00
|
|
|
} arg_action = ACTION_SHOW;
|
|
|
|
|
2015-05-19 00:24:27 +02:00
|
|
|
typedef struct BootId {
|
2013-06-28 17:26:30 +02:00
|
|
|
sd_id128_t id;
|
2013-10-29 04:43:57 +01:00
|
|
|
uint64_t first;
|
|
|
|
uint64_t last;
|
2015-05-19 00:24:27 +02:00
|
|
|
LIST_FIELDS(struct BootId, boot_list);
|
|
|
|
} BootId;
|
2013-06-28 17:26:30 +02:00
|
|
|
|
2016-02-01 10:44:58 +01:00
|
|
|
static int add_matches_for_device(sd_journal *j, const char *devpath) {
|
|
|
|
int r;
|
|
|
|
_cleanup_udev_unref_ struct udev *udev = NULL;
|
|
|
|
_cleanup_udev_device_unref_ struct udev_device *device = NULL;
|
|
|
|
struct udev_device *d = NULL;
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
assert(devpath);
|
|
|
|
|
|
|
|
if (!path_startswith(devpath, "/dev/")) {
|
|
|
|
log_error("Devpath does not start with /dev/");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
udev = udev_new();
|
|
|
|
if (!udev)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
r = stat(devpath, &st);
|
|
|
|
if (r < 0)
|
|
|
|
log_error_errno(errno, "Couldn't stat file: %m");
|
|
|
|
|
|
|
|
d = device = udev_device_new_from_devnum(udev, S_ISBLK(st.st_mode) ? 'b' : 'c', st.st_rdev);
|
|
|
|
if (!device)
|
|
|
|
return log_error_errno(errno, "Failed to get udev device from devnum %u:%u: %m", major(st.st_rdev), minor(st.st_rdev));
|
|
|
|
|
|
|
|
while (d) {
|
|
|
|
_cleanup_free_ char *match = NULL;
|
|
|
|
const char *subsys, *sysname, *devnode;
|
|
|
|
|
|
|
|
subsys = udev_device_get_subsystem(d);
|
|
|
|
if (!subsys) {
|
|
|
|
d = udev_device_get_parent(d);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
sysname = udev_device_get_sysname(d);
|
|
|
|
if (!sysname) {
|
|
|
|
d = udev_device_get_parent(d);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
match = strjoin("_KERNEL_DEVICE=+", subsys, ":", sysname, NULL);
|
|
|
|
if (!match)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
r = sd_journal_add_match(j, match, 0);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match: %m");
|
|
|
|
|
|
|
|
devnode = udev_device_get_devnode(d);
|
|
|
|
if (devnode) {
|
|
|
|
_cleanup_free_ char *match1 = NULL;
|
|
|
|
|
|
|
|
r = stat(devnode, &st);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to stat() device node \"%s\": %m", devnode);
|
|
|
|
|
|
|
|
r = asprintf(&match1, "_KERNEL_DEVICE=%c%u:%u", S_ISBLK(st.st_mode) ? 'b' : 'c', major(st.st_rdev), minor(st.st_rdev));
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
r = sd_journal_add_match(j, match1, 0);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
d = udev_device_get_parent(d);
|
|
|
|
}
|
|
|
|
|
2016-02-03 11:22:52 +01:00
|
|
|
r = add_match_this_boot(j, arg_machine);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match for the current boot: %m");
|
|
|
|
|
2016-02-01 10:44:58 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-08 22:37:45 +02:00
|
|
|
static char *format_timestamp_maybe_utc(char *buf, size_t l, usec_t t) {
|
|
|
|
|
|
|
|
if (arg_utc)
|
|
|
|
return format_timestamp_utc(buf, l, t);
|
|
|
|
|
|
|
|
return format_timestamp(buf, l, t);
|
|
|
|
}
|
|
|
|
|
2013-12-26 01:52:01 +01:00
|
|
|
static int parse_boot_descriptor(const char *x, sd_id128_t *boot_id, int *offset) {
|
|
|
|
sd_id128_t id = SD_ID128_NULL;
|
|
|
|
int off = 0, r;
|
|
|
|
|
|
|
|
if (strlen(x) >= 32) {
|
|
|
|
char *t;
|
|
|
|
|
|
|
|
t = strndupa(x, 32);
|
|
|
|
r = sd_id128_from_string(t, &id);
|
|
|
|
if (r >= 0)
|
|
|
|
x += 32;
|
|
|
|
|
|
|
|
if (*x != '-' && *x != '+' && *x != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (*x != 0) {
|
|
|
|
r = safe_atoi(x, &off);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
r = safe_atoi(x, &off);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (boot_id)
|
|
|
|
*boot_id = id;
|
|
|
|
|
|
|
|
if (offset)
|
|
|
|
*offset = off;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-02 17:12:21 +02:00
|
|
|
static void help(void) {
|
2011-12-21 18:59:56 +01:00
|
|
|
|
2016-02-19 19:25:13 +01:00
|
|
|
pager_open(arg_no_pager, arg_pager_end);
|
2013-12-12 00:22:48 +01:00
|
|
|
|
2012-10-18 23:22:56 +02:00
|
|
|
printf("%s [OPTIONS...] [MATCHES...]\n\n"
|
2012-10-18 03:33:44 +02:00
|
|
|
"Query the journal.\n\n"
|
2015-11-11 12:59:09 +01:00
|
|
|
"Options:\n"
|
2014-03-29 05:37:25 +01:00
|
|
|
" --system Show the system journal\n"
|
|
|
|
" --user Show the user journal for the current user\n"
|
2013-12-11 22:04:03 +01:00
|
|
|
" -M --machine=CONTAINER Operate on local container\n"
|
2015-10-13 10:50:49 +02:00
|
|
|
" -S --since=DATE Show entries not older than the specified date\n"
|
|
|
|
" -U --until=DATE Show entries not newer than the specified date\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" -c --cursor=CURSOR Show entries starting at the specified cursor\n"
|
|
|
|
" --after-cursor=CURSOR Show entries after the specified cursor\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" --show-cursor Print the cursor after all the entries\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" -b --boot[=ID] Show current boot or the specified boot\n"
|
2013-10-29 04:43:57 +01:00
|
|
|
" --list-boots Show terse information about recorded boots\n"
|
2013-12-18 03:36:27 +01:00
|
|
|
" -k --dmesg Show kernel message log from the current boot\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" -u --unit=UNIT Show logs from the specified unit\n"
|
|
|
|
" --user-unit=UNIT Show logs from the specified user unit\n"
|
|
|
|
" -t --identifier=STRING Show entries with the specified syslog identifier\n"
|
|
|
|
" -p --priority=RANGE Show entries with the specified priority\n"
|
|
|
|
" -e --pager-end Immediately jump to the end in the pager\n"
|
2013-12-18 03:36:27 +01:00
|
|
|
" -f --follow Follow the journal\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" -n --lines[=INTEGER] Number of journal entries to show\n"
|
|
|
|
" --no-tail Show all lines, even in follow mode\n"
|
|
|
|
" -r --reverse Show the newest entries first\n"
|
2013-08-04 15:04:20 +02:00
|
|
|
" -o --output=STRING Change journal output mode (short, short-iso,\n"
|
2013-12-18 03:36:27 +01:00
|
|
|
" short-precise, short-monotonic, verbose,\n"
|
|
|
|
" export, json, json-pretty, json-sse, cat)\n"
|
2014-10-02 14:39:29 +02:00
|
|
|
" --utc Express time in Coordinated Universal Time (UTC)\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" -x --catalog Add message explanations where available\n"
|
2013-10-07 03:55:18 +02:00
|
|
|
" --no-full Ellipsize fields\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" -a --all Show all fields, including long and unprintable\n"
|
2015-10-23 01:18:17 +02:00
|
|
|
" -q --quiet Do not show info messages and privilege warning\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" --no-pager Do not pipe output into a pager\n"
|
2016-04-20 20:09:57 +02:00
|
|
|
" --no-hostname Suppress output of hostname field\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" -m --merge Show entries from all available journals\n"
|
|
|
|
" -D --directory=PATH Show journal files from directory\n"
|
|
|
|
" --file=PATH Show journal file\n"
|
2015-11-11 12:59:09 +01:00
|
|
|
" --root=ROOT Operate on catalog files below a root directory\n"
|
2012-10-18 03:33:44 +02:00
|
|
|
#ifdef HAVE_GCRYPT
|
2013-07-16 16:21:18 +02:00
|
|
|
" --interval=TIME Time interval for changing the FSS sealing key\n"
|
|
|
|
" --verify-key=KEY Specify FSS verification key\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" --force Override of the FSS key pair with --setup-keys\n"
|
2012-10-18 03:33:44 +02:00
|
|
|
#endif
|
|
|
|
"\nCommands:\n"
|
2013-12-18 03:36:27 +01:00
|
|
|
" -h --help Show this help text\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" --version Show package version\n"
|
2016-01-27 19:01:42 +01:00
|
|
|
" -N --fields List all field names currently used\n"
|
2014-11-03 23:08:33 +01:00
|
|
|
" -F --field=FIELD List all values that a specified field takes\n"
|
2013-12-18 03:36:27 +01:00
|
|
|
" --disk-usage Show total disk usage of all journal files\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" --vacuum-size=BYTES Reduce disk usage below specified size\n"
|
2015-10-02 23:21:59 +02:00
|
|
|
" --vacuum-files=INT Leave only the specified number of journal files\n"
|
|
|
|
" --vacuum-time=TIME Remove journal files older than specified time\n"
|
2015-11-11 12:59:09 +01:00
|
|
|
" --verify Verify journal file consistency\n"
|
|
|
|
" --sync Synchronize unwritten journal messages to disk\n"
|
2014-11-03 23:08:33 +01:00
|
|
|
" --flush Flush all journal data from /run into /var\n"
|
2015-09-30 21:54:58 +02:00
|
|
|
" --rotate Request immediate rotation of the journal files\n"
|
2014-11-03 23:08:33 +01:00
|
|
|
" --header Show journal header information\n"
|
2015-01-19 19:42:34 +01:00
|
|
|
" --list-catalog Show all message IDs in the catalog\n"
|
2013-07-16 16:21:18 +02:00
|
|
|
" --dump-catalog Show entries in the message catalog\n"
|
|
|
|
" --update-catalog Update the message catalog database\n"
|
2015-11-11 12:59:09 +01:00
|
|
|
" --new-id128 Generate a new 128-bit ID\n"
|
2012-08-20 16:51:46 +02:00
|
|
|
#ifdef HAVE_GCRYPT
|
2013-12-18 03:36:27 +01:00
|
|
|
" --setup-keys Generate a new FSS key pair\n"
|
2012-08-20 16:51:46 +02:00
|
|
|
#endif
|
|
|
|
, program_invocation_short_name);
|
2011-12-21 18:59:56 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_argv(int argc, char *argv[]) {
|
|
|
|
|
|
|
|
enum {
|
|
|
|
ARG_VERSION = 0x100,
|
2012-01-04 15:27:31 +01:00
|
|
|
ARG_NO_PAGER,
|
2013-10-07 03:55:18 +02:00
|
|
|
ARG_NO_FULL,
|
2012-01-05 16:28:17 +01:00
|
|
|
ARG_NO_TAIL,
|
2012-07-16 22:24:02 +02:00
|
|
|
ARG_NEW_ID128,
|
2013-10-29 04:43:57 +01:00
|
|
|
ARG_LIST_BOOTS,
|
2013-06-05 01:33:34 +02:00
|
|
|
ARG_USER,
|
|
|
|
ARG_SYSTEM,
|
2013-03-29 02:44:00 +01:00
|
|
|
ARG_ROOT,
|
2012-08-13 20:31:10 +02:00
|
|
|
ARG_HEADER,
|
2012-08-15 01:54:09 +02:00
|
|
|
ARG_SETUP_KEYS,
|
2013-06-06 01:30:17 +02:00
|
|
|
ARG_FILE,
|
2012-08-17 00:45:18 +02:00
|
|
|
ARG_INTERVAL,
|
2012-08-16 02:14:34 +02:00
|
|
|
ARG_VERIFY,
|
2012-09-07 23:20:28 +02:00
|
|
|
ARG_VERIFY_KEY,
|
2012-10-11 16:42:46 +02:00
|
|
|
ARG_DISK_USAGE,
|
2013-07-16 16:21:18 +02:00
|
|
|
ARG_AFTER_CURSOR,
|
|
|
|
ARG_SHOW_CURSOR,
|
2013-03-01 14:39:04 +01:00
|
|
|
ARG_USER_UNIT,
|
2012-11-15 23:03:31 +01:00
|
|
|
ARG_LIST_CATALOG,
|
2013-03-20 01:54:04 +01:00
|
|
|
ARG_DUMP_CATALOG,
|
2013-06-05 01:33:34 +02:00
|
|
|
ARG_UPDATE_CATALOG,
|
2013-07-15 05:13:09 +02:00
|
|
|
ARG_FORCE,
|
2014-10-02 14:39:29 +02:00
|
|
|
ARG_UTC,
|
2015-11-11 12:59:09 +01:00
|
|
|
ARG_SYNC,
|
2014-10-23 00:28:17 +02:00
|
|
|
ARG_FLUSH,
|
2015-09-30 21:54:58 +02:00
|
|
|
ARG_ROTATE,
|
2014-11-03 23:08:33 +01:00
|
|
|
ARG_VACUUM_SIZE,
|
2015-10-02 23:21:59 +02:00
|
|
|
ARG_VACUUM_FILES,
|
2014-11-03 23:08:33 +01:00
|
|
|
ARG_VACUUM_TIME,
|
2016-04-20 20:09:57 +02:00
|
|
|
ARG_NO_HOSTNAME,
|
2011-12-21 18:59:56 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct option options[] = {
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "help", no_argument, NULL, 'h' },
|
|
|
|
{ "version" , no_argument, NULL, ARG_VERSION },
|
|
|
|
{ "no-pager", no_argument, NULL, ARG_NO_PAGER },
|
|
|
|
{ "pager-end", no_argument, NULL, 'e' },
|
|
|
|
{ "follow", no_argument, NULL, 'f' },
|
|
|
|
{ "force", no_argument, NULL, ARG_FORCE },
|
|
|
|
{ "output", required_argument, NULL, 'o' },
|
|
|
|
{ "all", no_argument, NULL, 'a' },
|
|
|
|
{ "full", no_argument, NULL, 'l' },
|
2013-10-07 03:55:18 +02:00
|
|
|
{ "no-full", no_argument, NULL, ARG_NO_FULL },
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "lines", optional_argument, NULL, 'n' },
|
|
|
|
{ "no-tail", no_argument, NULL, ARG_NO_TAIL },
|
|
|
|
{ "new-id128", no_argument, NULL, ARG_NEW_ID128 },
|
|
|
|
{ "quiet", no_argument, NULL, 'q' },
|
|
|
|
{ "merge", no_argument, NULL, 'm' },
|
|
|
|
{ "boot", optional_argument, NULL, 'b' },
|
2013-10-29 04:43:57 +01:00
|
|
|
{ "list-boots", no_argument, NULL, ARG_LIST_BOOTS },
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "this-boot", optional_argument, NULL, 'b' }, /* deprecated */
|
|
|
|
{ "dmesg", no_argument, NULL, 'k' },
|
|
|
|
{ "system", no_argument, NULL, ARG_SYSTEM },
|
|
|
|
{ "user", no_argument, NULL, ARG_USER },
|
|
|
|
{ "directory", required_argument, NULL, 'D' },
|
|
|
|
{ "file", required_argument, NULL, ARG_FILE },
|
|
|
|
{ "root", required_argument, NULL, ARG_ROOT },
|
|
|
|
{ "header", no_argument, NULL, ARG_HEADER },
|
2014-08-19 11:27:34 +02:00
|
|
|
{ "identifier", required_argument, NULL, 't' },
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "priority", required_argument, NULL, 'p' },
|
|
|
|
{ "setup-keys", no_argument, NULL, ARG_SETUP_KEYS },
|
|
|
|
{ "interval", required_argument, NULL, ARG_INTERVAL },
|
|
|
|
{ "verify", no_argument, NULL, ARG_VERIFY },
|
|
|
|
{ "verify-key", required_argument, NULL, ARG_VERIFY_KEY },
|
|
|
|
{ "disk-usage", no_argument, NULL, ARG_DISK_USAGE },
|
|
|
|
{ "cursor", required_argument, NULL, 'c' },
|
|
|
|
{ "after-cursor", required_argument, NULL, ARG_AFTER_CURSOR },
|
|
|
|
{ "show-cursor", no_argument, NULL, ARG_SHOW_CURSOR },
|
2015-10-13 10:50:49 +02:00
|
|
|
{ "since", required_argument, NULL, 'S' },
|
|
|
|
{ "until", required_argument, NULL, 'U' },
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "unit", required_argument, NULL, 'u' },
|
|
|
|
{ "user-unit", required_argument, NULL, ARG_USER_UNIT },
|
|
|
|
{ "field", required_argument, NULL, 'F' },
|
2016-01-27 19:01:42 +01:00
|
|
|
{ "fields", no_argument, NULL, 'N' },
|
2013-07-16 16:21:18 +02:00
|
|
|
{ "catalog", no_argument, NULL, 'x' },
|
|
|
|
{ "list-catalog", no_argument, NULL, ARG_LIST_CATALOG },
|
|
|
|
{ "dump-catalog", no_argument, NULL, ARG_DUMP_CATALOG },
|
|
|
|
{ "update-catalog", no_argument, NULL, ARG_UPDATE_CATALOG },
|
|
|
|
{ "reverse", no_argument, NULL, 'r' },
|
2013-12-11 22:04:03 +01:00
|
|
|
{ "machine", required_argument, NULL, 'M' },
|
2014-10-02 14:39:29 +02:00
|
|
|
{ "utc", no_argument, NULL, ARG_UTC },
|
2014-10-23 00:28:17 +02:00
|
|
|
{ "flush", no_argument, NULL, ARG_FLUSH },
|
2015-11-11 12:59:09 +01:00
|
|
|
{ "sync", no_argument, NULL, ARG_SYNC },
|
2015-09-30 21:54:58 +02:00
|
|
|
{ "rotate", no_argument, NULL, ARG_ROTATE },
|
2014-11-03 23:08:33 +01:00
|
|
|
{ "vacuum-size", required_argument, NULL, ARG_VACUUM_SIZE },
|
2015-10-02 23:21:59 +02:00
|
|
|
{ "vacuum-files", required_argument, NULL, ARG_VACUUM_FILES },
|
2014-11-03 23:08:33 +01:00
|
|
|
{ "vacuum-time", required_argument, NULL, ARG_VACUUM_TIME },
|
2016-04-20 20:09:57 +02:00
|
|
|
{ "no-hostname", no_argument, NULL, ARG_NO_HOSTNAME },
|
2013-11-06 18:28:39 +01:00
|
|
|
{}
|
2011-12-21 18:59:56 +01:00
|
|
|
};
|
|
|
|
|
2012-01-04 02:14:42 +01:00
|
|
|
int c, r;
|
2011-12-21 18:59:56 +01:00
|
|
|
|
|
|
|
assert(argc >= 0);
|
|
|
|
assert(argv);
|
|
|
|
|
2016-01-27 19:01:42 +01:00
|
|
|
while ((c = getopt_long(argc, argv, "hefo:aln::qmb::kD:p:c:S:U:t:u:NF:xrM:", options, NULL)) >= 0)
|
2011-12-21 18:59:56 +01:00
|
|
|
|
|
|
|
switch (c) {
|
|
|
|
|
|
|
|
case 'h':
|
2014-08-02 17:12:21 +02:00
|
|
|
help();
|
|
|
|
return 0;
|
2011-12-21 18:59:56 +01:00
|
|
|
|
|
|
|
case ARG_VERSION:
|
2015-09-23 03:01:06 +02:00
|
|
|
return version();
|
2011-12-21 18:59:56 +01:00
|
|
|
|
|
|
|
case ARG_NO_PAGER:
|
|
|
|
arg_no_pager = true;
|
|
|
|
break;
|
|
|
|
|
2013-03-07 20:44:35 +01:00
|
|
|
case 'e':
|
|
|
|
arg_pager_end = true;
|
2013-03-07 21:49:12 +01:00
|
|
|
|
2014-10-22 20:23:45 +02:00
|
|
|
if (arg_lines == ARG_LINES_DEFAULT)
|
2013-03-07 21:49:12 +01:00
|
|
|
arg_lines = 1000;
|
|
|
|
|
2013-03-07 20:44:35 +01:00
|
|
|
break;
|
|
|
|
|
2011-12-21 18:59:56 +01:00
|
|
|
case 'f':
|
|
|
|
arg_follow = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 'o':
|
2012-09-21 22:33:02 +02:00
|
|
|
arg_output = output_mode_from_string(optarg);
|
2012-01-04 18:33:36 +01:00
|
|
|
if (arg_output < 0) {
|
2012-10-15 18:14:09 +02:00
|
|
|
log_error("Unknown output format '%s'.", optarg);
|
2011-12-21 18:59:56 +01:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-01-04 18:33:36 +01:00
|
|
|
|
2012-10-15 18:14:09 +02:00
|
|
|
if (arg_output == OUTPUT_EXPORT ||
|
|
|
|
arg_output == OUTPUT_JSON ||
|
|
|
|
arg_output == OUTPUT_JSON_PRETTY ||
|
|
|
|
arg_output == OUTPUT_JSON_SSE ||
|
|
|
|
arg_output == OUTPUT_CAT)
|
|
|
|
arg_quiet = true;
|
|
|
|
|
2011-12-21 18:59:56 +01:00
|
|
|
break;
|
|
|
|
|
2013-06-17 11:36:35 +02:00
|
|
|
case 'l':
|
2012-11-17 15:27:59 +01:00
|
|
|
arg_full = true;
|
|
|
|
break;
|
|
|
|
|
2013-10-07 03:55:18 +02:00
|
|
|
case ARG_NO_FULL:
|
|
|
|
arg_full = false;
|
|
|
|
break;
|
|
|
|
|
2011-12-21 18:59:56 +01:00
|
|
|
case 'a':
|
2012-10-18 23:22:56 +02:00
|
|
|
arg_all = true;
|
2011-12-21 18:59:56 +01:00
|
|
|
break;
|
|
|
|
|
2012-01-04 02:14:42 +01:00
|
|
|
case 'n':
|
2012-09-21 22:33:02 +02:00
|
|
|
if (optarg) {
|
2014-08-31 11:12:22 +02:00
|
|
|
if (streq(optarg, "all"))
|
2014-10-22 20:23:45 +02:00
|
|
|
arg_lines = ARG_LINES_ALL;
|
2014-08-31 11:12:22 +02:00
|
|
|
else {
|
|
|
|
r = safe_atoi(optarg, &arg_lines);
|
|
|
|
if (r < 0 || arg_lines < 0) {
|
|
|
|
log_error("Failed to parse lines '%s'", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-09-21 22:33:02 +02:00
|
|
|
}
|
2013-02-12 00:31:13 +01:00
|
|
|
} else {
|
2014-08-31 11:12:22 +02:00
|
|
|
arg_lines = 10;
|
2013-02-12 00:31:13 +01:00
|
|
|
|
|
|
|
/* Hmm, no argument? Maybe the next
|
|
|
|
* word on the command line is
|
|
|
|
* supposed to be the argument? Let's
|
|
|
|
* see if there is one, and is
|
2014-08-31 11:12:22 +02:00
|
|
|
* parsable. */
|
|
|
|
if (optind < argc) {
|
|
|
|
int n;
|
|
|
|
if (streq(argv[optind], "all")) {
|
2014-10-22 20:23:45 +02:00
|
|
|
arg_lines = ARG_LINES_ALL;
|
2014-08-31 11:12:22 +02:00
|
|
|
optind++;
|
|
|
|
} else if (safe_atoi(argv[optind], &n) >= 0 && n >= 0) {
|
|
|
|
arg_lines = n;
|
|
|
|
optind++;
|
|
|
|
}
|
|
|
|
}
|
2013-02-12 00:31:13 +01:00
|
|
|
}
|
2012-09-21 22:33:02 +02:00
|
|
|
|
2012-01-04 02:14:42 +01:00
|
|
|
break;
|
|
|
|
|
2012-01-04 15:27:31 +01:00
|
|
|
case ARG_NO_TAIL:
|
|
|
|
arg_no_tail = true;
|
|
|
|
break;
|
|
|
|
|
2012-01-07 01:37:15 +01:00
|
|
|
case ARG_NEW_ID128:
|
2012-08-13 20:31:10 +02:00
|
|
|
arg_action = ACTION_NEW_ID128;
|
2012-01-05 16:28:17 +01:00
|
|
|
break;
|
|
|
|
|
2012-03-14 19:54:22 +01:00
|
|
|
case 'q':
|
|
|
|
arg_quiet = true;
|
2012-03-15 07:26:55 +01:00
|
|
|
break;
|
2012-03-14 19:54:22 +01:00
|
|
|
|
2012-09-06 01:49:00 +02:00
|
|
|
case 'm':
|
|
|
|
arg_merge = true;
|
2012-03-27 00:14:29 +02:00
|
|
|
break;
|
|
|
|
|
2012-07-01 18:47:40 +02:00
|
|
|
case 'b':
|
2013-07-16 21:56:22 +02:00
|
|
|
arg_boot = true;
|
2013-12-25 19:17:10 +01:00
|
|
|
|
2013-12-26 01:52:01 +01:00
|
|
|
if (optarg) {
|
2015-05-18 23:50:34 +02:00
|
|
|
r = parse_boot_descriptor(optarg, &arg_boot_id, &arg_boot_offset);
|
2013-12-26 01:52:01 +01:00
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse boot descriptor '%s'", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
} else {
|
2013-12-25 19:17:10 +01:00
|
|
|
|
2013-12-26 01:52:01 +01:00
|
|
|
/* Hmm, no argument? Maybe the next
|
|
|
|
* word on the command line is
|
|
|
|
* supposed to be the argument? Let's
|
|
|
|
* see if there is one and is parsable
|
|
|
|
* as a boot descriptor... */
|
|
|
|
|
|
|
|
if (optind < argc &&
|
|
|
|
parse_boot_descriptor(argv[optind], &arg_boot_id, &arg_boot_offset) >= 0)
|
2013-12-25 19:17:10 +01:00
|
|
|
optind++;
|
|
|
|
}
|
2013-07-16 21:56:22 +02:00
|
|
|
|
2012-07-01 18:47:40 +02:00
|
|
|
break;
|
|
|
|
|
2013-10-29 04:43:57 +01:00
|
|
|
case ARG_LIST_BOOTS:
|
|
|
|
arg_action = ACTION_LIST_BOOTS;
|
|
|
|
break;
|
|
|
|
|
2013-05-15 05:08:00 +02:00
|
|
|
case 'k':
|
2013-07-16 21:56:22 +02:00
|
|
|
arg_boot = arg_dmesg = true;
|
2013-05-15 05:08:00 +02:00
|
|
|
break;
|
|
|
|
|
2013-06-05 01:33:34 +02:00
|
|
|
case ARG_SYSTEM:
|
|
|
|
arg_journal_type |= SD_JOURNAL_SYSTEM;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ARG_USER:
|
|
|
|
arg_journal_type |= SD_JOURNAL_CURRENT_USER;
|
|
|
|
break;
|
|
|
|
|
2013-12-11 22:04:03 +01:00
|
|
|
case 'M':
|
|
|
|
arg_machine = optarg;
|
|
|
|
break;
|
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
case 'D':
|
|
|
|
arg_directory = optarg;
|
|
|
|
break;
|
|
|
|
|
2013-06-06 01:30:17 +02:00
|
|
|
case ARG_FILE:
|
2016-04-25 00:31:24 +02:00
|
|
|
if (streq(optarg, "-"))
|
|
|
|
/* An undocumented feature: we can read journal files from STDIN. We don't document
|
|
|
|
* this though, since after all we only support this for mmap-able, seekable files, and
|
|
|
|
* not for example pipes which are probably the primary usecase for reading things from
|
|
|
|
* STDIN. To avoid confusion we hence don't document this feature. */
|
|
|
|
arg_file_stdin = true;
|
|
|
|
else {
|
|
|
|
r = glob_extend(&arg_file, optarg);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add paths: %m");
|
|
|
|
}
|
2013-06-06 01:30:17 +02:00
|
|
|
break;
|
|
|
|
|
2013-03-29 02:44:00 +01:00
|
|
|
case ARG_ROOT:
|
2015-10-22 19:54:29 +02:00
|
|
|
r = parse_path_argument_and_warn(optarg, true, &arg_root);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-03-29 02:44:00 +01:00
|
|
|
break;
|
|
|
|
|
2012-09-27 23:25:23 +02:00
|
|
|
case 'c':
|
|
|
|
arg_cursor = optarg;
|
|
|
|
break;
|
|
|
|
|
2013-07-16 16:21:18 +02:00
|
|
|
case ARG_AFTER_CURSOR:
|
|
|
|
arg_after_cursor = optarg;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ARG_SHOW_CURSOR:
|
|
|
|
arg_show_cursor = true;
|
|
|
|
break;
|
|
|
|
|
2012-07-16 22:24:02 +02:00
|
|
|
case ARG_HEADER:
|
2012-08-13 20:31:10 +02:00
|
|
|
arg_action = ACTION_PRINT_HEADER;
|
|
|
|
break;
|
|
|
|
|
2012-08-20 16:51:46 +02:00
|
|
|
case ARG_VERIFY:
|
|
|
|
arg_action = ACTION_VERIFY;
|
|
|
|
break;
|
|
|
|
|
2012-09-07 23:20:28 +02:00
|
|
|
case ARG_DISK_USAGE:
|
|
|
|
arg_action = ACTION_DISK_USAGE;
|
|
|
|
break;
|
|
|
|
|
2014-11-03 23:08:33 +01:00
|
|
|
case ARG_VACUUM_SIZE:
|
|
|
|
r = parse_size(optarg, 1024, &arg_vacuum_size);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse vacuum size: %s", optarg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_action = ACTION_VACUUM;
|
|
|
|
break;
|
|
|
|
|
2015-10-02 23:21:59 +02:00
|
|
|
case ARG_VACUUM_FILES:
|
|
|
|
r = safe_atou64(optarg, &arg_vacuum_n_files);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse vacuum files: %s", optarg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_action = ACTION_VACUUM;
|
|
|
|
break;
|
|
|
|
|
2014-11-03 23:08:33 +01:00
|
|
|
case ARG_VACUUM_TIME:
|
|
|
|
r = parse_sec(optarg, &arg_vacuum_time);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse vacuum time: %s", optarg);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_action = ACTION_VACUUM;
|
|
|
|
break;
|
|
|
|
|
2012-08-20 16:51:46 +02:00
|
|
|
#ifdef HAVE_GCRYPT
|
2013-07-15 05:13:09 +02:00
|
|
|
case ARG_FORCE:
|
|
|
|
arg_force = true;
|
|
|
|
break;
|
|
|
|
|
2012-08-13 20:31:10 +02:00
|
|
|
case ARG_SETUP_KEYS:
|
|
|
|
arg_action = ACTION_SETUP_KEYS;
|
2012-07-16 22:24:02 +02:00
|
|
|
break;
|
|
|
|
|
2012-08-15 01:54:09 +02:00
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
case ARG_VERIFY_KEY:
|
2012-08-16 02:14:34 +02:00
|
|
|
arg_action = ACTION_VERIFY;
|
2012-08-17 00:45:18 +02:00
|
|
|
arg_verify_key = optarg;
|
2012-09-06 01:49:00 +02:00
|
|
|
arg_merge = false;
|
2012-08-16 02:14:34 +02:00
|
|
|
break;
|
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
case ARG_INTERVAL:
|
2013-04-02 20:38:16 +02:00
|
|
|
r = parse_sec(optarg, &arg_interval);
|
2012-08-17 00:45:18 +02:00
|
|
|
if (r < 0 || arg_interval <= 0) {
|
|
|
|
log_error("Failed to parse sealing key change interval: %s", optarg);
|
2012-08-16 23:58:14 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
break;
|
2012-08-20 16:51:46 +02:00
|
|
|
#else
|
|
|
|
case ARG_SETUP_KEYS:
|
|
|
|
case ARG_VERIFY_KEY:
|
|
|
|
case ARG_INTERVAL:
|
2013-07-15 05:13:09 +02:00
|
|
|
case ARG_FORCE:
|
2012-08-20 16:51:46 +02:00
|
|
|
log_error("Forward-secure sealing not available.");
|
2015-03-13 14:08:00 +01:00
|
|
|
return -EOPNOTSUPP;
|
2012-08-20 16:51:46 +02:00
|
|
|
#endif
|
2012-08-16 23:58:14 +02:00
|
|
|
|
2012-07-27 10:31:33 +02:00
|
|
|
case 'p': {
|
|
|
|
const char *dots;
|
|
|
|
|
|
|
|
dots = strstr(optarg, "..");
|
|
|
|
if (dots) {
|
|
|
|
char *a;
|
|
|
|
int from, to, i;
|
|
|
|
|
|
|
|
/* a range */
|
|
|
|
a = strndup(optarg, dots - optarg);
|
|
|
|
if (!a)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
from = log_level_from_string(a);
|
|
|
|
to = log_level_from_string(dots + 2);
|
|
|
|
free(a);
|
|
|
|
|
|
|
|
if (from < 0 || to < 0) {
|
|
|
|
log_error("Failed to parse log level range %s", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_priorities = 0;
|
|
|
|
|
|
|
|
if (from < to) {
|
|
|
|
for (i = from; i <= to; i++)
|
|
|
|
arg_priorities |= 1 << i;
|
|
|
|
} else {
|
|
|
|
for (i = to; i <= from; i++)
|
|
|
|
arg_priorities |= 1 << i;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
int p, i;
|
|
|
|
|
|
|
|
p = log_level_from_string(optarg);
|
|
|
|
if (p < 0) {
|
|
|
|
log_error("Unknown log level %s", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
arg_priorities = 0;
|
|
|
|
|
|
|
|
for (i = 0; i <= p; i++)
|
|
|
|
arg_priorities |= 1 << i;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-10-13 10:50:49 +02:00
|
|
|
case 'S':
|
2012-10-11 16:42:46 +02:00
|
|
|
r = parse_timestamp(optarg, &arg_since);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse timestamp: %s", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
arg_since_set = true;
|
|
|
|
break;
|
|
|
|
|
2015-10-13 10:50:49 +02:00
|
|
|
case 'U':
|
2012-10-11 16:42:46 +02:00
|
|
|
r = parse_timestamp(optarg, &arg_until);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error("Failed to parse timestamp: %s", optarg);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
arg_until_set = true;
|
|
|
|
break;
|
|
|
|
|
2014-08-19 11:27:34 +02:00
|
|
|
case 't':
|
|
|
|
r = strv_extend(&arg_syslog_identifier, optarg);
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
|
|
|
break;
|
|
|
|
|
2013-03-14 00:30:05 +01:00
|
|
|
case 'u':
|
2013-04-12 09:14:43 +02:00
|
|
|
r = strv_extend(&arg_system_units, optarg);
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
2013-03-01 14:39:04 +01:00
|
|
|
break;
|
|
|
|
|
2013-03-14 00:30:05 +01:00
|
|
|
case ARG_USER_UNIT:
|
2013-04-12 09:14:43 +02:00
|
|
|
r = strv_extend(&arg_user_units, optarg);
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
2012-10-16 02:59:27 +02:00
|
|
|
break;
|
|
|
|
|
2012-10-18 03:33:44 +02:00
|
|
|
case 'F':
|
2016-01-27 19:01:42 +01:00
|
|
|
arg_action = ACTION_LIST_FIELDS;
|
2012-10-18 03:33:44 +02:00
|
|
|
arg_field = optarg;
|
|
|
|
break;
|
|
|
|
|
2016-01-27 19:01:42 +01:00
|
|
|
case 'N':
|
|
|
|
arg_action = ACTION_LIST_FIELD_NAMES;
|
|
|
|
break;
|
|
|
|
|
2016-04-20 20:09:57 +02:00
|
|
|
case ARG_NO_HOSTNAME:
|
|
|
|
arg_no_hostname = true;
|
|
|
|
break;
|
|
|
|
|
2012-11-15 23:03:31 +01:00
|
|
|
case 'x':
|
|
|
|
arg_catalog = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ARG_LIST_CATALOG:
|
|
|
|
arg_action = ACTION_LIST_CATALOG;
|
|
|
|
break;
|
|
|
|
|
2013-03-20 01:54:04 +01:00
|
|
|
case ARG_DUMP_CATALOG:
|
|
|
|
arg_action = ACTION_DUMP_CATALOG;
|
|
|
|
break;
|
|
|
|
|
2012-11-15 23:03:31 +01:00
|
|
|
case ARG_UPDATE_CATALOG:
|
|
|
|
arg_action = ACTION_UPDATE_CATALOG;
|
|
|
|
break;
|
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
case 'r':
|
|
|
|
arg_reverse = true;
|
|
|
|
break;
|
|
|
|
|
2014-10-02 14:39:29 +02:00
|
|
|
case ARG_UTC:
|
|
|
|
arg_utc = true;
|
|
|
|
break;
|
|
|
|
|
2014-10-23 00:28:17 +02:00
|
|
|
case ARG_FLUSH:
|
|
|
|
arg_action = ACTION_FLUSH;
|
|
|
|
break;
|
|
|
|
|
2015-09-30 21:54:58 +02:00
|
|
|
case ARG_ROTATE:
|
|
|
|
arg_action = ACTION_ROTATE;
|
|
|
|
break;
|
|
|
|
|
2015-11-11 12:59:09 +01:00
|
|
|
case ARG_SYNC:
|
|
|
|
arg_action = ACTION_SYNC;
|
|
|
|
break;
|
|
|
|
|
2013-11-06 18:28:39 +01:00
|
|
|
case '?':
|
2011-12-21 18:59:56 +01:00
|
|
|
return -EINVAL;
|
2013-11-06 18:28:39 +01:00
|
|
|
|
|
|
|
default:
|
|
|
|
assert_not_reached("Unhandled option");
|
2011-12-21 18:59:56 +01:00
|
|
|
}
|
|
|
|
|
2014-11-25 20:47:49 +01:00
|
|
|
if (arg_follow && !arg_no_tail && !arg_since && arg_lines == ARG_LINES_DEFAULT)
|
2012-01-04 15:27:31 +01:00
|
|
|
arg_lines = 10;
|
|
|
|
|
2013-12-11 22:04:03 +01:00
|
|
|
if (!!arg_directory + !!arg_file + !!arg_machine > 1) {
|
|
|
|
log_error("Please specify either -D/--directory= or --file= or -M/--machine=, not more than one.");
|
2013-06-06 01:30:17 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-02-24 15:27:51 +01:00
|
|
|
if (arg_since_set && arg_until_set && arg_since > arg_until) {
|
2012-10-11 16:42:46 +02:00
|
|
|
log_error("--since= must be before --until=.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-07-16 16:21:18 +02:00
|
|
|
if (!!arg_cursor + !!arg_after_cursor + !!arg_since_set > 1) {
|
|
|
|
log_error("Please specify only one of --since=, --cursor=, and --after-cursor.");
|
2012-10-11 16:42:46 +02:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
if (arg_follow && arg_reverse) {
|
|
|
|
log_error("Please specify either --reverse= or --follow=, not both.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-10-13 08:04:11 +02:00
|
|
|
if (!IN_SET(arg_action, ACTION_SHOW, ACTION_DUMP_CATALOG, ACTION_LIST_CATALOG) && optind < argc) {
|
2014-02-27 05:01:43 +01:00
|
|
|
log_error("Extraneous arguments starting with '%s'", argv[optind]);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if ((arg_boot || arg_action == ACTION_LIST_BOOTS) && (arg_file || arg_directory || arg_merge)) {
|
|
|
|
log_error("Using --boot or --list-boots with --file, --directory or --merge is not supported.");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2011-12-21 18:59:56 +01:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-01-07 01:37:15 +01:00
|
|
|
static int generate_new_id128(void) {
|
2012-01-05 16:28:17 +01:00
|
|
|
sd_id128_t id;
|
|
|
|
int r;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
r = sd_id128_randomize(&id);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to generate ID: %m");
|
2012-01-05 16:28:17 +01:00
|
|
|
|
|
|
|
printf("As string:\n"
|
|
|
|
SD_ID128_FORMAT_STR "\n\n"
|
|
|
|
"As UUID:\n"
|
|
|
|
"%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n\n"
|
|
|
|
"As macro:\n"
|
2013-02-11 04:47:14 +01:00
|
|
|
"#define MESSAGE_XYZ SD_ID128_MAKE(",
|
2012-01-05 16:28:17 +01:00
|
|
|
SD_ID128_FORMAT_VAL(id),
|
|
|
|
SD_ID128_FORMAT_VAL(id));
|
|
|
|
for (i = 0; i < 16; i++)
|
|
|
|
printf("%02x%s", id.bytes[i], i != 15 ? "," : "");
|
2013-02-11 04:47:14 +01:00
|
|
|
fputs(")\n\n", stdout);
|
2012-01-05 16:28:17 +01:00
|
|
|
|
2013-02-11 04:47:14 +01:00
|
|
|
printf("As Python constant:\n"
|
|
|
|
">>> import uuid\n"
|
|
|
|
">>> MESSAGE_XYZ = uuid.UUID('" SD_ID128_FORMAT_STR "')\n",
|
|
|
|
SD_ID128_FORMAT_VAL(id));
|
2012-01-05 16:28:17 +01:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
static int add_matches(sd_journal *j, char **args) {
|
|
|
|
char **i;
|
2014-07-20 03:05:07 +02:00
|
|
|
bool have_term = false;
|
2012-07-01 18:47:40 +02:00
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
assert(j);
|
2012-07-01 18:47:40 +02:00
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
STRV_FOREACH(i, args) {
|
2013-03-15 23:00:57 +01:00
|
|
|
int r;
|
2012-07-01 18:47:40 +02:00
|
|
|
|
2014-07-20 03:05:07 +02:00
|
|
|
if (streq(*i, "+")) {
|
|
|
|
if (!have_term)
|
|
|
|
break;
|
2012-07-13 00:29:26 +02:00
|
|
|
r = sd_journal_add_disjunction(j);
|
2014-07-20 03:05:07 +02:00
|
|
|
have_term = false;
|
|
|
|
|
|
|
|
} else if (path_is_absolute(*i)) {
|
2016-02-01 10:44:58 +01:00
|
|
|
_cleanup_free_ char *p, *t = NULL, *t2 = NULL, *interpreter = NULL;
|
2012-05-30 22:45:47 +02:00
|
|
|
const char *path;
|
2012-07-11 01:08:38 +02:00
|
|
|
struct stat st;
|
2012-05-30 22:45:47 +02:00
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
p = canonicalize_file_name(*i);
|
2016-02-01 10:44:58 +01:00
|
|
|
path = p ?: *i;
|
2012-05-30 22:45:47 +02:00
|
|
|
|
2015-05-19 00:22:56 +02:00
|
|
|
if (lstat(path, &st) < 0)
|
2014-11-28 19:57:32 +01:00
|
|
|
return log_error_errno(errno, "Couldn't stat file: %m");
|
2012-05-30 22:45:47 +02:00
|
|
|
|
2013-07-19 10:02:50 +02:00
|
|
|
if (S_ISREG(st.st_mode) && (0111 & st.st_mode)) {
|
|
|
|
if (executable_is_script(path, &interpreter) > 0) {
|
|
|
|
_cleanup_free_ char *comm;
|
|
|
|
|
2013-12-07 03:29:55 +01:00
|
|
|
comm = strndup(basename(path), 15);
|
2013-07-19 10:02:50 +02:00
|
|
|
if (!comm)
|
|
|
|
return log_oom();
|
|
|
|
|
|
|
|
t = strappend("_COMM=", comm);
|
2016-02-01 10:44:58 +01:00
|
|
|
if (!t)
|
|
|
|
return log_oom();
|
2013-07-19 10:02:50 +02:00
|
|
|
|
|
|
|
/* Append _EXE only if the interpreter is not a link.
|
2014-02-17 03:37:13 +01:00
|
|
|
Otherwise, it might be outdated often. */
|
2016-02-01 10:44:58 +01:00
|
|
|
if (lstat(interpreter, &st) == 0 && !S_ISLNK(st.st_mode)) {
|
2013-07-19 10:02:50 +02:00
|
|
|
t2 = strappend("_EXE=", interpreter);
|
|
|
|
if (!t2)
|
|
|
|
return log_oom();
|
|
|
|
}
|
2016-02-01 10:44:58 +01:00
|
|
|
} else {
|
2013-07-19 10:02:50 +02:00
|
|
|
t = strappend("_EXE=", path);
|
2016-02-01 10:44:58 +01:00
|
|
|
if (!t)
|
|
|
|
return log_oom();
|
|
|
|
}
|
|
|
|
|
|
|
|
r = sd_journal_add_match(j, t, 0);
|
|
|
|
|
|
|
|
if (r >=0 && t2)
|
|
|
|
r = sd_journal_add_match(j, t2, 0);
|
|
|
|
|
|
|
|
} else if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
|
|
|
|
r = add_matches_for_device(j, path);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
} else {
|
2013-03-25 01:16:00 +01:00
|
|
|
log_error("File is neither a device node, nor regular file, nor executable: %s", *i);
|
2012-07-11 01:08:38 +02:00
|
|
|
return -EINVAL;
|
2012-05-30 18:43:23 +02:00
|
|
|
}
|
2012-05-30 22:45:47 +02:00
|
|
|
|
2014-07-20 03:05:07 +02:00
|
|
|
have_term = true;
|
|
|
|
} else {
|
2012-07-13 00:29:26 +02:00
|
|
|
r = sd_journal_add_match(j, *i, 0);
|
2014-07-20 03:05:07 +02:00
|
|
|
have_term = true;
|
|
|
|
}
|
2012-05-30 22:45:47 +02:00
|
|
|
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match '%s': %m", *i);
|
2011-10-15 01:13:37 +02:00
|
|
|
}
|
|
|
|
|
2014-07-20 03:05:07 +02:00
|
|
|
if (!strv_isempty(args) && !have_term) {
|
|
|
|
log_error("\"+\" can only be used between terms");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-19 00:35:02 +02:00
|
|
|
static void boot_id_free_all(BootId *l) {
|
|
|
|
|
|
|
|
while (l) {
|
|
|
|
BootId *i = l;
|
|
|
|
LIST_REMOVE(boot_list, l, i);
|
|
|
|
free(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-19 00:24:27 +02:00
|
|
|
static int discover_next_boot(
|
|
|
|
sd_journal *j,
|
|
|
|
BootId **boot,
|
|
|
|
bool advance_older,
|
|
|
|
bool read_realtime) {
|
|
|
|
|
2013-10-29 04:43:57 +01:00
|
|
|
int r;
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
char match[9+32+1] = "_BOOT_ID=";
|
2015-05-19 00:24:27 +02:00
|
|
|
_cleanup_free_ BootId *next_boot = NULL;
|
2014-10-23 19:37:29 +02:00
|
|
|
|
|
|
|
assert(j);
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
assert(boot);
|
|
|
|
|
|
|
|
/* We expect the journal to be on the last position of a boot
|
|
|
|
* (in relation to the direction we are going), so that the next
|
|
|
|
* invocation of sd_journal_next/previous will be from a different
|
|
|
|
* boot. We then collect any information we desire and then jump
|
|
|
|
* to the last location of the new boot by using a _BOOT_ID match
|
|
|
|
* coming from the other journal direction. */
|
|
|
|
|
|
|
|
/* Make sure we aren't restricted by any _BOOT_ID matches, so that
|
|
|
|
* we can actually advance to a *different* boot. */
|
|
|
|
sd_journal_flush_matches(j);
|
|
|
|
|
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_previous(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_next(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
else if (r == 0)
|
|
|
|
return 0; /* End of journal, yay. */
|
|
|
|
|
2015-05-19 00:24:27 +02:00
|
|
|
next_boot = new0(BootId, 1);
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (!next_boot)
|
2015-05-19 00:25:45 +02:00
|
|
|
return -ENOMEM;
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
r = sd_journal_get_monotonic_usec(j, NULL, &next_boot->id);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (read_realtime) {
|
|
|
|
r = sd_journal_get_realtime_usec(j, &next_boot->first);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
2014-10-23 19:37:29 +02:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
/* Now seek to the last occurrence of this boot ID. */
|
|
|
|
sd_id128_to_string(next_boot->id, match + 9);
|
|
|
|
r = sd_journal_add_match(j, match, sizeof(match) - 1);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_seek_head(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_seek_tail(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_next(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_previous(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
else if (r == 0)
|
|
|
|
return -ENODATA; /* This shouldn't happen. We just came from this very boot ID. */
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (read_realtime) {
|
|
|
|
r = sd_journal_get_realtime_usec(j, &next_boot->last);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
*boot = next_boot;
|
|
|
|
next_boot = NULL;
|
2015-05-19 00:35:02 +02:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-19 00:24:27 +02:00
|
|
|
static int get_boots(
|
|
|
|
sd_journal *j,
|
|
|
|
BootId **boots,
|
|
|
|
BootId *query_ref_boot,
|
|
|
|
int ref_boot_offset) {
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
bool skip_once;
|
|
|
|
int r, count = 0;
|
2015-05-19 00:24:27 +02:00
|
|
|
BootId *head = NULL, *tail = NULL;
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
const bool advance_older = query_ref_boot && ref_boot_offset <= 0;
|
|
|
|
|
|
|
|
assert(j);
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
/* Adjust for the asymmetry that offset 0 is
|
|
|
|
* the last (and current) boot, while 1 is considered the
|
|
|
|
* (chronological) first boot in the journal. */
|
|
|
|
skip_once = query_ref_boot && sd_id128_is_null(query_ref_boot->id) && ref_boot_offset < 0;
|
|
|
|
|
|
|
|
/* Advance to the earliest/latest occurrence of our reference
|
|
|
|
* boot ID (taking our lookup direction into account), so that
|
|
|
|
* discover_next_boot() can do its job.
|
|
|
|
* If no reference is given, the journal head/tail will do,
|
|
|
|
* they're "virtual" boots after all. */
|
|
|
|
if (query_ref_boot && !sd_id128_is_null(query_ref_boot->id)) {
|
|
|
|
char match[9+32+1] = "_BOOT_ID=";
|
|
|
|
|
|
|
|
sd_journal_flush_matches(j);
|
|
|
|
|
|
|
|
sd_id128_to_string(query_ref_boot->id, match + 9);
|
|
|
|
r = sd_journal_add_match(j, match, sizeof(match) - 1);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_seek_head(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_seek_tail(j);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_next(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_previous(j);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
else if (r == 0)
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
goto finish;
|
|
|
|
else if (ref_boot_offset == 0) {
|
|
|
|
count = 1;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (advance_older)
|
|
|
|
r = sd_journal_seek_tail(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_seek_head(j);
|
2013-10-29 04:43:57 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
/* No sd_journal_next/previous here. */
|
|
|
|
}
|
2013-10-29 04:43:57 +01:00
|
|
|
|
2015-05-19 00:24:27 +02:00
|
|
|
for (;;) {
|
|
|
|
_cleanup_free_ BootId *current = NULL;
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
r = discover_next_boot(j, ¤t, advance_older, !query_ref_boot);
|
|
|
|
if (r < 0) {
|
2015-05-19 00:35:02 +02:00
|
|
|
boot_id_free_all(head);
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
return r;
|
2014-10-23 19:37:29 +02:00
|
|
|
}
|
2013-10-29 04:43:57 +01:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
if (!current)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (query_ref_boot) {
|
|
|
|
if (!skip_once)
|
|
|
|
ref_boot_offset += advance_older ? 1 : -1;
|
|
|
|
skip_once = false;
|
|
|
|
|
|
|
|
if (ref_boot_offset == 0) {
|
|
|
|
count = 1;
|
|
|
|
query_ref_boot->id = current->id;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LIST_INSERT_AFTER(boot_list, head, tail, current);
|
|
|
|
tail = current;
|
|
|
|
current = NULL;
|
|
|
|
count++;
|
|
|
|
}
|
2013-10-29 04:43:57 +01:00
|
|
|
}
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
finish:
|
|
|
|
if (boots)
|
|
|
|
*boots = head;
|
|
|
|
|
|
|
|
sd_journal_flush_matches(j);
|
|
|
|
|
|
|
|
return count;
|
2014-10-23 19:37:29 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int list_boots(sd_journal *j) {
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
int w, i, count;
|
2015-05-19 00:35:02 +02:00
|
|
|
BootId *id, *all_ids;
|
2014-10-23 19:37:29 +02:00
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
count = get_boots(j, &all_ids, NULL, 0);
|
2015-05-19 00:25:45 +02:00
|
|
|
if (count < 0)
|
|
|
|
return log_error_errno(count, "Failed to determine boots: %m");
|
|
|
|
if (count == 0)
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
return count;
|
2014-10-23 19:37:29 +02:00
|
|
|
|
2016-02-19 19:25:13 +01:00
|
|
|
pager_open(arg_no_pager, arg_pager_end);
|
2013-10-29 04:43:57 +01:00
|
|
|
|
|
|
|
/* numbers are one less, but we need an extra char for the sign */
|
|
|
|
w = DECIMAL_STR_WIDTH(count - 1) + 1;
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
i = 0;
|
2015-05-19 00:35:02 +02:00
|
|
|
LIST_FOREACH(boot_list, id, all_ids) {
|
2013-10-29 04:43:57 +01:00
|
|
|
char a[FORMAT_TIMESTAMP_MAX], b[FORMAT_TIMESTAMP_MAX];
|
|
|
|
|
|
|
|
printf("% *i " SD_ID128_FORMAT_STR " %s—%s\n",
|
|
|
|
w, i - count + 1,
|
|
|
|
SD_ID128_FORMAT_VAL(id->id),
|
2014-10-08 22:37:45 +02:00
|
|
|
format_timestamp_maybe_utc(a, sizeof(a), id->first),
|
|
|
|
format_timestamp_maybe_utc(b, sizeof(b), id->last));
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
i++;
|
2013-07-16 21:56:22 +02:00
|
|
|
}
|
2012-07-11 01:08:38 +02:00
|
|
|
|
2015-05-19 00:35:02 +02:00
|
|
|
boot_id_free_all(all_ids);
|
|
|
|
|
2013-06-28 17:26:30 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int add_boot(sd_journal *j) {
|
|
|
|
char match[9+32+1] = "_BOOT_ID=";
|
2013-12-26 01:52:01 +01:00
|
|
|
int r;
|
2015-05-19 00:24:27 +02:00
|
|
|
BootId ref_boot_id = {};
|
2013-06-28 17:26:30 +02:00
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
2013-07-16 21:56:22 +02:00
|
|
|
if (!arg_boot)
|
2013-06-28 17:26:30 +02:00
|
|
|
return 0;
|
|
|
|
|
2013-12-26 01:52:01 +01:00
|
|
|
if (arg_boot_offset == 0 && sd_id128_equal(arg_boot_id, SD_ID128_NULL))
|
2013-12-11 22:04:03 +01:00
|
|
|
return add_match_this_boot(j, arg_machine);
|
2013-06-28 17:26:30 +02:00
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
ref_boot_id.id = arg_boot_id;
|
|
|
|
r = get_boots(j, NULL, &ref_boot_id, arg_boot_offset);
|
|
|
|
assert(r <= 1);
|
|
|
|
if (r <= 0) {
|
|
|
|
const char *reason = (r == 0) ? "No such boot ID in journal" : strerror(-r);
|
|
|
|
|
|
|
|
if (sd_id128_is_null(arg_boot_id))
|
2016-02-01 09:29:02 +01:00
|
|
|
log_error("Data from the specified boot (%+i) is not available: %s",
|
|
|
|
arg_boot_offset, reason);
|
2013-07-16 21:56:22 +02:00
|
|
|
else
|
2016-02-01 09:29:02 +01:00
|
|
|
log_error("Data from the specified boot ("SD_ID128_FORMAT_STR") is not available: %s",
|
|
|
|
SD_ID128_FORMAT_VAL(arg_boot_id), reason);
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
|
|
|
|
return r == 0 ? -ENODATA : r;
|
2013-06-28 17:26:30 +02:00
|
|
|
}
|
|
|
|
|
journalctl: Improve boot ID lookup
This method should greatly improve offset based lookup, by simply jumping
from one boot to the next boot. It starts at the journal head to get the
a boot ID, makes a _BOOT_ID match and then comes from the opposite
journal direction (tail) to get to the end that boot. After flushing the matches
and advancing the journal from that exact position, we arrive at the start
of next boot. Rinse and repeat.
This is faster than the old method of aggregating the full boot listing just
so we can jump to a specific boot, which can be a real pain on big journals
just for a mere "-b -1" case.
As an additional benefit --list-boots should improve slightly too, because
it does less seeking.
Note that there can be a change in boot order with this lookup method
because it will use the order of boots in the journal, not the realtime stamp
stored in them. That's arguably better, though.
Another deficiency is that it will get confused with boots interleaving in the
journal, therefore, it will refuse operation in --merge, --file and --directory mode.
https://bugs.freedesktop.org/show_bug.cgi?id=72601
2015-05-01 15:15:16 +02:00
|
|
|
sd_id128_to_string(ref_boot_id.id, match + 9);
|
2013-07-16 21:56:22 +02:00
|
|
|
|
|
|
|
r = sd_journal_add_match(j, match, sizeof(match) - 1);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match: %m");
|
2013-06-28 17:26:30 +02:00
|
|
|
|
|
|
|
r = sd_journal_add_conjunction(j);
|
|
|
|
if (r < 0)
|
2015-05-19 00:25:45 +02:00
|
|
|
return log_error_errno(r, "Failed to add conjunction: %m");
|
2013-06-28 17:26:30 +02:00
|
|
|
|
|
|
|
return 0;
|
2012-07-11 01:08:38 +02:00
|
|
|
}
|
|
|
|
|
2013-05-15 05:08:00 +02:00
|
|
|
static int add_dmesg(sd_journal *j) {
|
|
|
|
int r;
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
if (!arg_dmesg)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
r = sd_journal_add_match(j, "_TRANSPORT=kernel", strlen("_TRANSPORT=kernel"));
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match: %m");
|
2013-05-15 05:08:00 +02:00
|
|
|
|
|
|
|
r = sd_journal_add_conjunction(j);
|
|
|
|
if (r < 0)
|
2015-05-19 00:25:45 +02:00
|
|
|
return log_error_errno(r, "Failed to add conjunction: %m");
|
2013-05-15 05:08:00 +02:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-05-19 00:25:45 +02:00
|
|
|
static int get_possible_units(
|
|
|
|
sd_journal *j,
|
|
|
|
const char *fields,
|
|
|
|
char **patterns,
|
|
|
|
Set **units) {
|
|
|
|
|
2013-12-29 01:47:36 +01:00
|
|
|
_cleanup_set_free_free_ Set *found;
|
|
|
|
const char *field;
|
2012-10-16 02:59:27 +02:00
|
|
|
int r;
|
2013-12-29 01:47:36 +01:00
|
|
|
|
2014-08-13 01:00:18 +02:00
|
|
|
found = set_new(&string_hash_ops);
|
2013-12-29 01:47:36 +01:00
|
|
|
if (!found)
|
2015-05-19 00:25:45 +02:00
|
|
|
return -ENOMEM;
|
2013-12-29 01:47:36 +01:00
|
|
|
|
|
|
|
NULSTR_FOREACH(field, fields) {
|
|
|
|
const void *data;
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
r = sd_journal_query_unique(j, field);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
SD_JOURNAL_FOREACH_UNIQUE(j, data, size) {
|
|
|
|
char **pattern, *eq;
|
|
|
|
size_t prefix;
|
|
|
|
_cleanup_free_ char *u = NULL;
|
|
|
|
|
|
|
|
eq = memchr(data, '=', size);
|
|
|
|
if (eq)
|
|
|
|
prefix = eq - (char*) data + 1;
|
|
|
|
else
|
|
|
|
prefix = 0;
|
|
|
|
|
|
|
|
u = strndup((char*) data + prefix, size - prefix);
|
|
|
|
if (!u)
|
2015-05-19 00:25:45 +02:00
|
|
|
return -ENOMEM;
|
2013-12-29 01:47:36 +01:00
|
|
|
|
|
|
|
STRV_FOREACH(pattern, patterns)
|
|
|
|
if (fnmatch(*pattern, u, FNM_NOESCAPE) == 0) {
|
|
|
|
log_debug("Matched %s with pattern %s=%s", u, field, *pattern);
|
|
|
|
|
|
|
|
r = set_consume(found, u);
|
|
|
|
u = NULL;
|
|
|
|
if (r < 0 && r != -EEXIST)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*units = found;
|
|
|
|
found = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This list is supposed to return the superset of unit names
|
|
|
|
* possibly matched by rules added with add_matches_for_unit... */
|
|
|
|
#define SYSTEM_UNITS \
|
|
|
|
"_SYSTEMD_UNIT\0" \
|
|
|
|
"COREDUMP_UNIT\0" \
|
|
|
|
"UNIT\0" \
|
|
|
|
"OBJECT_SYSTEMD_UNIT\0" \
|
|
|
|
"_SYSTEMD_SLICE\0"
|
|
|
|
|
|
|
|
/* ... and add_matches_for_user_unit */
|
|
|
|
#define USER_UNITS \
|
|
|
|
"_SYSTEMD_USER_UNIT\0" \
|
|
|
|
"USER_UNIT\0" \
|
|
|
|
"COREDUMP_USER_UNIT\0" \
|
|
|
|
"OBJECT_SYSTEMD_USER_UNIT\0"
|
|
|
|
|
|
|
|
static int add_units(sd_journal *j) {
|
|
|
|
_cleanup_strv_free_ char **patterns = NULL;
|
|
|
|
int r, count = 0;
|
2013-04-12 09:14:43 +02:00
|
|
|
char **i;
|
2012-10-16 02:59:27 +02:00
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
2013-04-12 09:14:43 +02:00
|
|
|
STRV_FOREACH(i, arg_system_units) {
|
2013-12-29 01:47:36 +01:00
|
|
|
_cleanup_free_ char *u = NULL;
|
|
|
|
|
2015-04-30 20:21:00 +02:00
|
|
|
r = unit_name_mangle(*i, UNIT_NAME_GLOB, &u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-12-29 01:47:36 +01:00
|
|
|
|
|
|
|
if (string_is_glob(u)) {
|
|
|
|
r = strv_push(&patterns, u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
u = NULL;
|
|
|
|
} else {
|
|
|
|
r = add_matches_for_unit(j, u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = sd_journal_add_disjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2016-02-23 05:32:04 +01:00
|
|
|
count++;
|
2013-12-29 01:47:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strv_isempty(patterns)) {
|
|
|
|
_cleanup_set_free_free_ Set *units = NULL;
|
|
|
|
Iterator it;
|
|
|
|
char *u;
|
|
|
|
|
|
|
|
r = get_possible_units(j, SYSTEM_UNITS, patterns, &units);
|
2013-04-12 09:14:43 +02:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2013-12-29 01:47:36 +01:00
|
|
|
|
|
|
|
SET_FOREACH(u, units, it) {
|
|
|
|
r = add_matches_for_unit(j, u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = sd_journal_add_disjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2016-02-23 05:32:04 +01:00
|
|
|
count++;
|
2013-12-29 01:47:36 +01:00
|
|
|
}
|
2013-04-12 09:14:43 +02:00
|
|
|
}
|
2012-10-16 02:59:27 +02:00
|
|
|
|
2015-07-31 19:56:38 +02:00
|
|
|
patterns = strv_free(patterns);
|
2013-12-29 01:47:36 +01:00
|
|
|
|
2013-04-12 09:14:43 +02:00
|
|
|
STRV_FOREACH(i, arg_user_units) {
|
2013-12-29 01:47:36 +01:00
|
|
|
_cleanup_free_ char *u = NULL;
|
|
|
|
|
2015-04-30 20:21:00 +02:00
|
|
|
r = unit_name_mangle(*i, UNIT_NAME_GLOB, &u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2012-10-16 02:59:27 +02:00
|
|
|
|
2013-12-29 01:47:36 +01:00
|
|
|
if (string_is_glob(u)) {
|
|
|
|
r = strv_push(&patterns, u);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
u = NULL;
|
|
|
|
} else {
|
|
|
|
r = add_matches_for_user_unit(j, u, getuid());
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = sd_journal_add_disjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2016-02-23 05:32:04 +01:00
|
|
|
count++;
|
2013-12-29 01:47:36 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strv_isempty(patterns)) {
|
|
|
|
_cleanup_set_free_free_ Set *units = NULL;
|
|
|
|
Iterator it;
|
|
|
|
char *u;
|
2013-04-12 09:14:43 +02:00
|
|
|
|
2013-12-29 01:47:36 +01:00
|
|
|
r = get_possible_units(j, USER_UNITS, patterns, &units);
|
2013-04-12 09:14:43 +02:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2013-12-29 01:47:36 +01:00
|
|
|
SET_FOREACH(u, units, it) {
|
|
|
|
r = add_matches_for_user_unit(j, u, getuid());
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = sd_journal_add_disjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2016-02-23 05:32:04 +01:00
|
|
|
count++;
|
2013-12-29 01:47:36 +01:00
|
|
|
}
|
2013-04-12 09:14:43 +02:00
|
|
|
}
|
2012-10-16 02:59:27 +02:00
|
|
|
|
2013-12-29 01:47:36 +01:00
|
|
|
/* Complain if the user request matches but nothing whatsoever was
|
|
|
|
* found, since otherwise everything would be matched. */
|
|
|
|
if (!(strv_isempty(arg_system_units) && strv_isempty(arg_user_units)) && count == 0)
|
|
|
|
return -ENODATA;
|
|
|
|
|
2013-04-11 15:27:55 +02:00
|
|
|
r = sd_journal_add_conjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
2012-10-16 02:59:27 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-27 10:31:33 +02:00
|
|
|
static int add_priorities(sd_journal *j) {
|
|
|
|
char match[] = "PRIORITY=0";
|
|
|
|
int i, r;
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
if (arg_priorities == 0xFF)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = LOG_EMERG; i <= LOG_DEBUG; i++)
|
|
|
|
if (arg_priorities & (1 << i)) {
|
|
|
|
match[sizeof(match)-2] = '0' + i;
|
|
|
|
|
|
|
|
r = sd_journal_add_match(j, match, strlen(match));
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to add match: %m");
|
2012-07-27 10:31:33 +02:00
|
|
|
}
|
|
|
|
|
2013-04-11 15:27:55 +02:00
|
|
|
r = sd_journal_add_conjunction(j);
|
|
|
|
if (r < 0)
|
2015-05-19 00:25:45 +02:00
|
|
|
return log_error_errno(r, "Failed to add conjunction: %m");
|
2013-04-11 15:27:55 +02:00
|
|
|
|
2012-07-27 10:31:33 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-08-19 11:27:34 +02:00
|
|
|
|
|
|
|
static int add_syslog_identifier(sd_journal *j) {
|
|
|
|
int r;
|
|
|
|
char **i;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
STRV_FOREACH(i, arg_syslog_identifier) {
|
|
|
|
char *u;
|
|
|
|
|
2015-02-03 02:05:59 +01:00
|
|
|
u = strjoina("SYSLOG_IDENTIFIER=", *i);
|
2014-08-19 11:27:34 +02:00
|
|
|
r = sd_journal_add_match(j, u, 0);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
r = sd_journal_add_disjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
r = sd_journal_add_conjunction(j);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-08-13 20:31:10 +02:00
|
|
|
static int setup_keys(void) {
|
|
|
|
#ifdef HAVE_GCRYPT
|
|
|
|
size_t mpk_size, seed_size, state_size, i;
|
|
|
|
uint8_t *mpk, *seed, *state;
|
2015-01-08 01:22:29 +01:00
|
|
|
int fd = -1, r;
|
2012-08-13 20:31:10 +02:00
|
|
|
sd_id128_t machine, boot;
|
|
|
|
char *p = NULL, *k = NULL;
|
2012-08-17 00:45:18 +02:00
|
|
|
struct FSSHeader h;
|
2012-08-16 23:58:14 +02:00
|
|
|
uint64_t n;
|
2013-07-13 05:57:15 +02:00
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
r = stat("/var/log/journal", &st);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (r < 0 && errno != ENOENT && errno != ENOTDIR)
|
|
|
|
return log_error_errno(errno, "stat(\"%s\") failed: %m", "/var/log/journal");
|
2013-07-13 05:57:15 +02:00
|
|
|
|
|
|
|
if (r < 0 || !S_ISDIR(st.st_mode)) {
|
|
|
|
log_error("%s is not a directory, must be using persistent logging for FSS.",
|
|
|
|
"/var/log/journal");
|
|
|
|
return r < 0 ? -errno : -ENOTDIR;
|
|
|
|
}
|
2012-08-13 20:31:10 +02:00
|
|
|
|
|
|
|
r = sd_id128_get_machine(&machine);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to get machine ID: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
|
|
|
|
r = sd_id128_get_boot(&boot);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to get boot ID: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss",
|
2012-08-13 20:31:10 +02:00
|
|
|
SD_ID128_FORMAT_VAL(machine)) < 0)
|
|
|
|
return log_oom();
|
|
|
|
|
2015-03-09 23:58:47 +01:00
|
|
|
if (arg_force) {
|
|
|
|
r = unlink(p);
|
|
|
|
if (r < 0 && errno != ENOENT) {
|
|
|
|
r = log_error_errno(errno, "unlink(\"%s\") failed: %m", p);
|
2013-07-15 05:13:09 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
2015-03-09 23:58:47 +01:00
|
|
|
} else if (access(p, F_OK) >= 0) {
|
|
|
|
log_error("Sealing key file %s exists already. Use --force to recreate.", p);
|
|
|
|
r = -EEXIST;
|
|
|
|
goto finish;
|
2012-08-13 20:31:10 +02:00
|
|
|
}
|
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
if (asprintf(&k, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss.tmp.XXXXXX",
|
2012-08-13 20:31:10 +02:00
|
|
|
SD_ID128_FORMAT_VAL(machine)) < 0) {
|
|
|
|
r = log_oom();
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
mpk_size = FSPRG_mskinbytes(FSPRG_RECOMMENDED_SECPAR);
|
|
|
|
mpk = alloca(mpk_size);
|
|
|
|
|
|
|
|
seed_size = FSPRG_RECOMMENDED_SEEDLEN;
|
|
|
|
seed = alloca(seed_size);
|
|
|
|
|
|
|
|
state_size = FSPRG_stateinbytes(FSPRG_RECOMMENDED_SECPAR);
|
|
|
|
state = alloca(state_size);
|
|
|
|
|
|
|
|
fd = open("/dev/random", O_RDONLY|O_CLOEXEC|O_NOCTTY);
|
|
|
|
if (fd < 0) {
|
2015-09-08 19:30:45 +02:00
|
|
|
r = log_error_errno(errno, "Failed to open /dev/random: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_info("Generating seed...");
|
2015-03-10 02:23:53 +01:00
|
|
|
r = loop_read_exact(fd, seed, seed_size, true);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to read random seed: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
log_info("Generating key pair...");
|
|
|
|
FSPRG_GenMK(NULL, mpk, seed, seed_size, FSPRG_RECOMMENDED_SECPAR);
|
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
log_info("Generating sealing key...");
|
2012-08-13 20:31:10 +02:00
|
|
|
FSPRG_GenState0(state, mpk, seed, seed_size);
|
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
assert(arg_interval > 0);
|
|
|
|
|
2012-08-13 20:31:10 +02:00
|
|
|
n = now(CLOCK_REALTIME);
|
2012-08-17 00:45:18 +02:00
|
|
|
n /= arg_interval;
|
2012-08-13 20:31:10 +02:00
|
|
|
|
2014-03-18 19:22:43 +01:00
|
|
|
safe_close(fd);
|
2014-01-28 13:47:35 +01:00
|
|
|
fd = mkostemp_safe(k, O_WRONLY|O_CLOEXEC);
|
2012-08-13 20:31:10 +02:00
|
|
|
if (fd < 0) {
|
2015-11-05 13:44:06 +01:00
|
|
|
r = log_error_errno(fd, "Failed to open %s: %m", k);
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2012-08-17 22:10:11 +02:00
|
|
|
/* Enable secure remove, exclusion from dump, synchronous
|
|
|
|
* writing and in-place updating */
|
2015-04-08 20:47:35 +02:00
|
|
|
r = chattr_fd(fd, FS_SECRM_FL|FS_NODUMP_FL|FS_SYNC_FL|FS_NOCOW_FL, FS_SECRM_FL|FS_NODUMP_FL|FS_SYNC_FL|FS_NOCOW_FL);
|
2015-01-08 01:22:29 +01:00
|
|
|
if (r < 0)
|
2015-11-05 13:44:06 +01:00
|
|
|
log_warning_errno(r, "Failed to set file attributes: %m");
|
2012-08-17 22:10:11 +02:00
|
|
|
|
2012-08-13 20:31:10 +02:00
|
|
|
zero(h);
|
|
|
|
memcpy(h.signature, "KSHHRHLP", 8);
|
|
|
|
h.machine_id = machine;
|
|
|
|
h.boot_id = boot;
|
|
|
|
h.header_size = htole64(sizeof(h));
|
2012-08-17 00:45:18 +02:00
|
|
|
h.start_usec = htole64(n * arg_interval);
|
|
|
|
h.interval_usec = htole64(arg_interval);
|
|
|
|
h.fsprg_secpar = htole16(FSPRG_RECOMMENDED_SECPAR);
|
|
|
|
h.fsprg_state_size = htole64(state_size);
|
2012-08-13 20:31:10 +02:00
|
|
|
|
2014-12-02 02:43:19 +01:00
|
|
|
r = loop_write(fd, &h, sizeof(h), false);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to write header: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2014-12-02 02:43:19 +01:00
|
|
|
r = loop_write(fd, state, state_size, false);
|
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to write state: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (link(k, p) < 0) {
|
2015-09-08 19:30:45 +02:00
|
|
|
r = log_error_errno(errno, "Failed to link file: %m");
|
2012-08-13 20:31:10 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2012-10-18 23:59:41 +02:00
|
|
|
if (on_tty()) {
|
2012-08-13 20:31:10 +02:00
|
|
|
fprintf(stderr,
|
|
|
|
"\n"
|
2015-09-19 00:45:05 +02:00
|
|
|
"The new key pair has been generated. The " ANSI_HIGHLIGHT "secret sealing key" ANSI_NORMAL " has been written to\n"
|
2012-08-21 01:02:08 +02:00
|
|
|
"the following local file. This key file is automatically updated when the\n"
|
|
|
|
"sealing key is advanced. It should not be used on multiple hosts.\n"
|
2012-08-13 20:31:10 +02:00
|
|
|
"\n"
|
|
|
|
"\t%s\n"
|
|
|
|
"\n"
|
2015-09-19 00:45:05 +02:00
|
|
|
"Please write down the following " ANSI_HIGHLIGHT "secret verification key" ANSI_NORMAL ". It should be stored\n"
|
2012-08-17 00:45:18 +02:00
|
|
|
"at a safe location and should not be saved locally on disk.\n"
|
2015-09-19 00:45:05 +02:00
|
|
|
"\n\t" ANSI_HIGHLIGHT_RED, p);
|
2012-08-13 20:31:10 +02:00
|
|
|
fflush(stderr);
|
|
|
|
}
|
|
|
|
for (i = 0; i < seed_size; i++) {
|
|
|
|
if (i > 0 && i % 3 == 0)
|
|
|
|
putchar('-');
|
|
|
|
printf("%02x", ((uint8_t*) seed)[i]);
|
|
|
|
}
|
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
printf("/%llx-%llx\n", (unsigned long long) n, (unsigned long long) arg_interval);
|
|
|
|
|
2012-10-18 23:59:41 +02:00
|
|
|
if (on_tty()) {
|
2012-08-20 22:02:19 +02:00
|
|
|
char tsb[FORMAT_TIMESPAN_MAX], *hn;
|
2012-08-13 20:31:10 +02:00
|
|
|
|
2012-08-17 00:45:18 +02:00
|
|
|
fprintf(stderr,
|
2015-09-19 00:45:05 +02:00
|
|
|
ANSI_NORMAL "\n"
|
2012-08-17 00:45:18 +02:00
|
|
|
"The sealing key is automatically changed every %s.\n",
|
2013-04-04 02:56:56 +02:00
|
|
|
format_timespan(tsb, sizeof(tsb), arg_interval, 0));
|
2012-08-20 22:02:19 +02:00
|
|
|
|
|
|
|
hn = gethostname_malloc();
|
|
|
|
|
|
|
|
if (hn) {
|
2015-07-28 04:36:36 +02:00
|
|
|
hostname_cleanup(hn);
|
2012-08-20 22:22:05 +02:00
|
|
|
fprintf(stderr, "\nThe keys have been generated for host %s/" SD_ID128_FORMAT_STR ".\n", hn, SD_ID128_FORMAT_VAL(machine));
|
2012-08-20 22:02:19 +02:00
|
|
|
} else
|
2012-08-20 22:22:05 +02:00
|
|
|
fprintf(stderr, "\nThe keys have been generated for host " SD_ID128_FORMAT_STR ".\n", SD_ID128_FORMAT_VAL(machine));
|
2012-08-20 22:02:19 +02:00
|
|
|
|
|
|
|
#ifdef HAVE_QRENCODE
|
2012-09-12 09:23:38 +02:00
|
|
|
/* If this is not an UTF-8 system don't print any QR codes */
|
2012-11-02 17:27:15 +01:00
|
|
|
if (is_locale_utf8()) {
|
2012-09-12 09:23:38 +02:00
|
|
|
fputs("\nTo transfer the verification key to your phone please scan the QR code below:\n\n", stderr);
|
|
|
|
print_qr_code(stderr, seed, seed_size, n, arg_interval, hn, machine);
|
|
|
|
}
|
2012-08-20 22:02:19 +02:00
|
|
|
#endif
|
|
|
|
free(hn);
|
2012-08-17 00:45:18 +02:00
|
|
|
}
|
2012-08-13 20:31:10 +02:00
|
|
|
|
|
|
|
r = 0;
|
|
|
|
|
|
|
|
finish:
|
2014-03-18 19:22:43 +01:00
|
|
|
safe_close(fd);
|
2012-08-13 20:31:10 +02:00
|
|
|
|
|
|
|
if (k) {
|
|
|
|
unlink(k);
|
|
|
|
free(k);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(p);
|
|
|
|
|
|
|
|
return r;
|
|
|
|
#else
|
2012-08-20 16:51:46 +02:00
|
|
|
log_error("Forward-secure sealing not available.");
|
2015-03-13 14:08:00 +01:00
|
|
|
return -EOPNOTSUPP;
|
2012-08-13 20:31:10 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2012-08-15 01:54:09 +02:00
|
|
|
static int verify(sd_journal *j) {
|
|
|
|
int r = 0;
|
|
|
|
Iterator i;
|
|
|
|
JournalFile *f;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
2012-08-21 15:53:48 +02:00
|
|
|
log_show_color(true);
|
|
|
|
|
2014-08-19 13:38:53 +02:00
|
|
|
ORDERED_HASHMAP_FOREACH(f, j->files, i) {
|
2012-08-15 01:54:09 +02:00
|
|
|
int k;
|
2015-03-27 12:02:49 +01:00
|
|
|
usec_t first = 0, validated = 0, last = 0;
|
2012-08-15 01:54:09 +02:00
|
|
|
|
2012-08-16 21:00:34 +02:00
|
|
|
#ifdef HAVE_GCRYPT
|
2012-08-20 16:51:46 +02:00
|
|
|
if (!arg_verify_key && JOURNAL_HEADER_SEALED(f->header))
|
2012-08-21 15:53:48 +02:00
|
|
|
log_notice("Journal file %s has sealing enabled but verification key has not been passed using --verify-key=.", f->path);
|
2012-08-16 21:00:34 +02:00
|
|
|
#endif
|
2012-08-16 02:14:34 +02:00
|
|
|
|
2012-09-24 15:02:43 +02:00
|
|
|
k = journal_file_verify(f, arg_verify_key, &first, &validated, &last, true);
|
2012-08-16 21:00:34 +02:00
|
|
|
if (k == -EINVAL) {
|
2012-08-17 00:45:18 +02:00
|
|
|
/* If the key was invalid give up right-away. */
|
2012-08-16 21:00:34 +02:00
|
|
|
return k;
|
|
|
|
} else if (k < 0) {
|
2015-09-30 22:16:17 +02:00
|
|
|
log_warning_errno(k, "FAIL: %s (%m)", f->path);
|
2012-08-16 21:00:34 +02:00
|
|
|
r = k;
|
2012-08-17 03:30:22 +02:00
|
|
|
} else {
|
|
|
|
char a[FORMAT_TIMESTAMP_MAX], b[FORMAT_TIMESTAMP_MAX], c[FORMAT_TIMESPAN_MAX];
|
2012-08-15 01:54:09 +02:00
|
|
|
log_info("PASS: %s", f->path);
|
2012-08-17 03:30:22 +02:00
|
|
|
|
2012-08-21 23:03:20 +02:00
|
|
|
if (arg_verify_key && JOURNAL_HEADER_SEALED(f->header)) {
|
2012-09-24 15:02:43 +02:00
|
|
|
if (validated > 0) {
|
2012-08-21 23:03:20 +02:00
|
|
|
log_info("=> Validated from %s to %s, final %s entries not sealed.",
|
2014-10-08 22:37:45 +02:00
|
|
|
format_timestamp_maybe_utc(a, sizeof(a), first),
|
|
|
|
format_timestamp_maybe_utc(b, sizeof(b), validated),
|
2013-04-04 02:56:56 +02:00
|
|
|
format_timespan(c, sizeof(c), last > validated ? last - validated : 0, 0));
|
2012-09-24 15:02:43 +02:00
|
|
|
} else if (last > 0)
|
2012-08-21 23:03:20 +02:00
|
|
|
log_info("=> No sealing yet, %s of entries not sealed.",
|
2013-04-04 02:56:56 +02:00
|
|
|
format_timespan(c, sizeof(c), last - first, 0));
|
2012-08-21 23:03:20 +02:00
|
|
|
else
|
|
|
|
log_info("=> No sealing yet, no entries in file.");
|
|
|
|
}
|
2012-08-17 03:30:22 +02:00
|
|
|
}
|
2012-08-15 01:54:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-03-11 23:03:13 +01:00
|
|
|
static int access_check_var_log_journal(sd_journal *j) {
|
2015-04-22 22:54:23 +02:00
|
|
|
#ifdef HAVE_ACL
|
2013-03-11 23:03:13 +01:00
|
|
|
_cleanup_strv_free_ char **g = NULL;
|
2015-04-22 22:54:23 +02:00
|
|
|
const char* dir;
|
|
|
|
#endif
|
2013-03-11 23:03:13 +01:00
|
|
|
int r;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
if (arg_quiet)
|
|
|
|
return 0;
|
2015-03-08 16:04:59 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
/* If we are root, we should have access, don't warn. */
|
|
|
|
if (getuid() == 0)
|
|
|
|
return 0;
|
2015-03-08 16:04:59 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
/* If we are in the 'systemd-journal' group, we should have
|
|
|
|
* access too. */
|
|
|
|
r = in_group("systemd-journal");
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to check if we are in the 'systemd-journal' group: %m");
|
|
|
|
if (r > 0)
|
|
|
|
return 0;
|
2012-10-18 03:34:43 +02:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
#ifdef HAVE_ACL
|
|
|
|
if (laccess("/run/log/journal", F_OK) >= 0)
|
|
|
|
dir = "/run/log/journal";
|
|
|
|
else
|
|
|
|
dir = "/var/log/journal";
|
|
|
|
|
|
|
|
/* If we are in any of the groups listed in the journal ACLs,
|
|
|
|
* then all is good, too. Let's enumerate all groups from the
|
|
|
|
* default ACL of the directory, which generally should allow
|
|
|
|
* access to most journal files too. */
|
|
|
|
r = acl_search_groups(dir, &g);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to search journal ACL: %m");
|
|
|
|
if (r > 0)
|
|
|
|
return 0;
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
/* Print a pretty list, if there were ACLs set. */
|
|
|
|
if (!strv_isempty(g)) {
|
|
|
|
_cleanup_free_ char *s = NULL;
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
/* Thre are groups in the ACL, let's list them */
|
|
|
|
r = strv_extend(&g, "systemd-journal");
|
|
|
|
if (r < 0)
|
|
|
|
return log_oom();
|
2013-03-11 23:03:13 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
strv_sort(g);
|
|
|
|
strv_uniq(g);
|
2013-03-11 23:03:13 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
s = strv_join(g, "', '");
|
|
|
|
if (!s)
|
|
|
|
return log_oom();
|
2013-03-11 23:03:13 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
log_notice("Hint: You are currently not seeing messages from other users and the system.\n"
|
|
|
|
" Users in groups '%s' can see all messages.\n"
|
|
|
|
" Pass -q to turn off this notice.", s);
|
|
|
|
return 1;
|
2013-03-11 23:03:13 +01:00
|
|
|
}
|
2015-04-22 22:54:23 +02:00
|
|
|
#endif
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
/* If no ACLs were found, print a short version of the message. */
|
|
|
|
log_notice("Hint: You are currently not seeing messages from other users and the system.\n"
|
|
|
|
" Users in the 'systemd-journal' group can see all messages. Pass -q to\n"
|
|
|
|
" turn off this notice.");
|
|
|
|
|
|
|
|
return 1;
|
2013-03-11 23:03:13 +01:00
|
|
|
}
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2013-03-11 23:03:13 +01:00
|
|
|
static int access_check(sd_journal *j) {
|
|
|
|
Iterator it;
|
2013-03-23 01:12:22 +01:00
|
|
|
void *code;
|
2015-11-02 23:37:05 +01:00
|
|
|
char *path;
|
2013-03-11 23:03:13 +01:00
|
|
|
int r = 0;
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2013-03-11 23:03:13 +01:00
|
|
|
assert(j);
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2015-11-02 23:37:05 +01:00
|
|
|
if (hashmap_isempty(j->errors)) {
|
2014-08-19 13:38:53 +02:00
|
|
|
if (ordered_hashmap_isempty(j->files))
|
2013-03-23 01:12:22 +01:00
|
|
|
log_notice("No journal files were found.");
|
2015-04-22 22:54:23 +02:00
|
|
|
|
2013-03-11 23:03:13 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2013-03-22 17:44:15 +01:00
|
|
|
|
2015-11-02 23:37:05 +01:00
|
|
|
if (hashmap_contains(j->errors, INT_TO_PTR(-EACCES))) {
|
2015-04-22 22:54:23 +02:00
|
|
|
(void) access_check_var_log_journal(j);
|
2013-03-23 01:12:22 +01:00
|
|
|
|
2015-04-22 22:54:23 +02:00
|
|
|
if (ordered_hashmap_isempty(j->files))
|
|
|
|
r = log_error_errno(EACCES, "No journal files were opened due to insufficient permissions.");
|
2013-03-11 23:03:13 +01:00
|
|
|
}
|
2012-10-18 03:34:43 +02:00
|
|
|
|
2015-11-02 23:37:05 +01:00
|
|
|
HASHMAP_FOREACH_KEY(path, code, j->errors, it) {
|
2013-03-23 01:12:22 +01:00
|
|
|
int err;
|
|
|
|
|
2015-11-02 23:13:01 +01:00
|
|
|
err = abs(PTR_TO_INT(code));
|
2013-03-23 01:12:22 +01:00
|
|
|
|
2015-11-02 23:37:05 +01:00
|
|
|
switch (err) {
|
|
|
|
case EACCES:
|
2015-04-22 22:54:23 +02:00
|
|
|
continue;
|
|
|
|
|
2015-11-02 23:37:05 +01:00
|
|
|
case ENODATA:
|
|
|
|
log_warning_errno(err, "Journal file %s is truncated, ignoring file.", path);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EPROTONOSUPPORT:
|
|
|
|
log_warning_errno(err, "Journal file %s uses an unsupported feature, ignoring file.", path);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case EBADMSG:
|
|
|
|
log_warning_errno(err, "Journal file %s corrupted, ignoring file.", path);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2016-04-25 00:25:04 +02:00
|
|
|
log_warning_errno(err, "An error was encountered while opening journal file or directory %s, ignoring file: %m", path);
|
2015-11-02 23:37:05 +01:00
|
|
|
break;
|
|
|
|
}
|
2013-03-11 23:03:13 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return r;
|
2012-10-18 03:34:43 +02:00
|
|
|
}
|
|
|
|
|
2014-10-23 00:28:17 +02:00
|
|
|
static int flush_to_var(void) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
|
|
|
_cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
|
2014-10-23 00:28:17 +02:00
|
|
|
_cleanup_close_ int watch_fd = -1;
|
|
|
|
int r;
|
|
|
|
|
2015-11-11 16:04:29 +01:00
|
|
|
if (arg_machine) {
|
|
|
|
log_error("--flush is not supported in conjunction with --machine=.");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2014-10-23 00:28:17 +02:00
|
|
|
/* Quick exit */
|
|
|
|
if (access("/run/systemd/journal/flushed", F_OK) >= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* OK, let's actually do the full logic, send SIGUSR1 to the
|
|
|
|
* daemon and set up inotify to wait for the flushed file to appear */
|
2015-09-24 13:30:10 +02:00
|
|
|
r = bus_connect_system_systemd(&bus);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to get D-Bus connection: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
r = sd_bus_call_method(
|
|
|
|
bus,
|
|
|
|
"org.freedesktop.systemd1",
|
|
|
|
"/org/freedesktop/systemd1",
|
|
|
|
"org.freedesktop.systemd1.Manager",
|
|
|
|
"KillUnit",
|
|
|
|
&error,
|
|
|
|
NULL,
|
|
|
|
"ssi", "systemd-journald.service", "main", SIGUSR1);
|
2015-11-11 12:59:09 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to kill journal service: %s", bus_error_message(&error, r));
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
mkdir_p("/run/systemd/journal", 0755);
|
|
|
|
|
|
|
|
watch_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (watch_fd < 0)
|
|
|
|
return log_error_errno(errno, "Failed to create inotify watch: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
r = inotify_add_watch(watch_fd, "/run/systemd/journal", IN_CREATE|IN_DONT_FOLLOW|IN_ONLYDIR);
|
2014-11-28 19:57:32 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "Failed to watch journal directory: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (access("/run/systemd/journal/flushed", F_OK) >= 0)
|
|
|
|
break;
|
|
|
|
|
2014-11-28 19:57:32 +01:00
|
|
|
if (errno != ENOENT)
|
2014-12-29 10:45:58 +01:00
|
|
|
return log_error_errno(errno, "Failed to check for existence of /run/systemd/journal/flushed: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
r = fd_wait_for_event(watch_fd, POLLIN, USEC_INFINITY);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to wait for event: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
|
|
|
|
r = flush_fd(watch_fd);
|
2014-11-28 18:23:20 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to flush inotify events: %m");
|
2014-10-23 00:28:17 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-11 13:56:54 +01:00
|
|
|
static int send_signal_and_wait(int sig, const char *watch_path) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
|
2015-11-11 12:59:09 +01:00
|
|
|
_cleanup_close_ int watch_fd = -1;
|
|
|
|
usec_t start;
|
|
|
|
int r;
|
|
|
|
|
2015-11-11 16:04:29 +01:00
|
|
|
if (arg_machine) {
|
|
|
|
log_error("--sync and --rotate are not supported in conjunction with --machine=.");
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2015-11-12 11:17:01 +01:00
|
|
|
start = now(CLOCK_MONOTONIC);
|
2015-11-11 12:59:09 +01:00
|
|
|
|
2015-11-11 13:56:54 +01:00
|
|
|
/* This call sends the specified signal to journald, and waits
|
|
|
|
* for acknowledgment by watching the mtime of the specified
|
|
|
|
* flag file. This is used to trigger syncing or rotation and
|
|
|
|
* then wait for the operation to complete. */
|
2015-11-11 12:59:09 +01:00
|
|
|
|
|
|
|
for (;;) {
|
2015-11-12 11:17:01 +01:00
|
|
|
usec_t tstamp;
|
2015-11-11 12:59:09 +01:00
|
|
|
|
|
|
|
/* See if a sync happened by now. */
|
2015-11-12 11:17:01 +01:00
|
|
|
r = read_timestamp_file(watch_path, &tstamp);
|
|
|
|
if (r < 0 && r != -ENOENT)
|
|
|
|
return log_error_errno(errno, "Failed to read %s: %m", watch_path);
|
|
|
|
if (r >= 0 && tstamp >= start)
|
|
|
|
return 0;
|
2015-11-11 12:59:09 +01:00
|
|
|
|
|
|
|
/* Let's ask for a sync, but only once. */
|
|
|
|
if (!bus) {
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
|
2015-11-11 12:59:09 +01:00
|
|
|
|
|
|
|
r = bus_connect_system_systemd(&bus);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to get D-Bus connection: %m");
|
|
|
|
|
|
|
|
r = sd_bus_call_method(
|
|
|
|
bus,
|
|
|
|
"org.freedesktop.systemd1",
|
|
|
|
"/org/freedesktop/systemd1",
|
|
|
|
"org.freedesktop.systemd1.Manager",
|
|
|
|
"KillUnit",
|
|
|
|
&error,
|
|
|
|
NULL,
|
2015-11-11 13:56:54 +01:00
|
|
|
"ssi", "systemd-journald.service", "main", sig);
|
2015-11-11 12:59:09 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to kill journal service: %s", bus_error_message(&error, r));
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Let's install the inotify watch, if we didn't do that yet. */
|
|
|
|
if (watch_fd < 0) {
|
|
|
|
|
|
|
|
mkdir_p("/run/systemd/journal", 0755);
|
|
|
|
|
|
|
|
watch_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
|
|
|
|
if (watch_fd < 0)
|
|
|
|
return log_error_errno(errno, "Failed to create inotify watch: %m");
|
|
|
|
|
2015-11-12 11:17:01 +01:00
|
|
|
r = inotify_add_watch(watch_fd, "/run/systemd/journal", IN_MOVED_TO|IN_DONT_FOLLOW|IN_ONLYDIR);
|
2015-11-11 12:59:09 +01:00
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(errno, "Failed to watch journal directory: %m");
|
|
|
|
|
|
|
|
/* Recheck the flag file immediately, so that we don't miss any event since the last check. */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, all preparatory steps done, let's wait until
|
|
|
|
* inotify reports an event. */
|
|
|
|
|
|
|
|
r = fd_wait_for_event(watch_fd, POLLIN, USEC_INFINITY);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to wait for event: %m");
|
|
|
|
|
|
|
|
r = flush_fd(watch_fd);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to flush inotify events: %m");
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-11 13:56:54 +01:00
|
|
|
static int rotate(void) {
|
|
|
|
return send_signal_and_wait(SIGUSR2, "/run/systemd/journal/rotated");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sync_journal(void) {
|
|
|
|
return send_signal_and_wait(SIGRTMIN+1, "/run/systemd/journal/synced");
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
int main(int argc, char *argv[]) {
|
|
|
|
int r;
|
tree-wide: expose "p"-suffix unref calls in public APIs to make gcc cleanup easy
GLIB has recently started to officially support the gcc cleanup
attribute in its public API, hence let's do the same for our APIs.
With this patch we'll define an xyz_unrefp() call for each public
xyz_unref() call, to make it easy to use inside a
__attribute__((cleanup())) expression. Then, all code is ported over to
make use of this.
The new calls are also documented in the man pages, with examples how to
use them (well, I only added docs where the _unref() call itself already
had docs, and the examples, only cover sd_bus_unrefp() and
sd_event_unrefp()).
This also renames sd_lldp_free() to sd_lldp_unref(), since that's how we
tend to call our destructors these days.
Note that this defines no public macro that wraps gcc's attribute and
makes it easier to use. While I think it's our duty in the library to
make our stuff easy to use, I figure it's not our duty to make gcc's own
features easy to use on its own. Most likely, client code which wants to
make use of this should define its own:
#define _cleanup_(function) __attribute__((cleanup(function)))
Or similar, to make the gcc feature easier to use.
Making this logic public has the benefit that we can remove three header
files whose only purpose was to define these functions internally.
See #2008.
2015-11-27 19:13:45 +01:00
|
|
|
_cleanup_(sd_journal_closep) sd_journal *j = NULL;
|
2012-07-11 01:08:38 +02:00
|
|
|
bool need_seek = false;
|
2012-07-11 01:36:55 +02:00
|
|
|
sd_id128_t previous_boot_id;
|
2013-01-28 05:53:52 +01:00
|
|
|
bool previous_boot_id_valid = false, first_line = true;
|
|
|
|
int n_shown = 0;
|
2013-08-04 01:38:13 +02:00
|
|
|
bool ellipsized = false;
|
2012-07-11 01:08:38 +02:00
|
|
|
|
2012-11-12 20:16:07 +01:00
|
|
|
setlocale(LC_ALL, "");
|
2012-07-11 01:08:38 +02:00
|
|
|
log_parse_environment();
|
|
|
|
log_open();
|
|
|
|
|
|
|
|
r = parse_argv(argc, argv);
|
|
|
|
if (r <= 0)
|
|
|
|
goto finish;
|
|
|
|
|
2012-10-19 00:06:47 +02:00
|
|
|
signal(SIGWINCH, columns_lines_cache_reset);
|
2015-01-05 00:52:47 +01:00
|
|
|
sigbus_install();
|
2012-10-19 00:06:47 +02:00
|
|
|
|
2015-01-08 03:17:50 +01:00
|
|
|
/* Increase max number of open files to 16K if we can, we
|
|
|
|
* might needs this when browsing journal files, which might
|
|
|
|
* be split up into many files. */
|
|
|
|
setrlimit_closest(RLIMIT_NOFILE, &RLIMIT_MAKE_CONST(16384));
|
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
switch (arg_action) {
|
2015-11-11 12:59:09 +01:00
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_NEW_ID128:
|
|
|
|
r = generate_new_id128();
|
2015-09-30 21:54:58 +02:00
|
|
|
goto finish;
|
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_SETUP_KEYS:
|
2012-08-13 20:31:10 +02:00
|
|
|
r = setup_keys();
|
|
|
|
goto finish;
|
2013-03-29 01:17:24 +01:00
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_LIST_CATALOG:
|
|
|
|
case ACTION_DUMP_CATALOG:
|
|
|
|
case ACTION_UPDATE_CATALOG: {
|
2014-07-26 20:47:31 +02:00
|
|
|
_cleanup_free_ char *database;
|
|
|
|
|
|
|
|
database = path_join(arg_root, CATALOG_DATABASE, NULL);
|
|
|
|
if (!database) {
|
|
|
|
r = log_oom();
|
|
|
|
goto finish;
|
2013-03-29 02:44:00 +01:00
|
|
|
}
|
|
|
|
|
2013-03-29 01:17:24 +01:00
|
|
|
if (arg_action == ACTION_UPDATE_CATALOG) {
|
2013-03-29 02:44:00 +01:00
|
|
|
r = catalog_update(database, arg_root, catalog_file_dirs);
|
2013-03-29 01:17:24 +01:00
|
|
|
if (r < 0)
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to list catalog: %m");
|
2013-03-29 01:17:24 +01:00
|
|
|
} else {
|
|
|
|
bool oneline = arg_action == ACTION_LIST_CATALOG;
|
|
|
|
|
2016-02-19 19:25:13 +01:00
|
|
|
pager_open(arg_no_pager, arg_pager_end);
|
2015-11-11 16:21:30 +01:00
|
|
|
|
2013-03-29 01:17:24 +01:00
|
|
|
if (optind < argc)
|
2015-11-11 16:21:30 +01:00
|
|
|
r = catalog_list_items(stdout, database, oneline, argv + optind);
|
2013-03-29 01:17:24 +01:00
|
|
|
else
|
2013-03-29 02:44:00 +01:00
|
|
|
r = catalog_list(stdout, database, oneline);
|
2013-03-29 01:17:24 +01:00
|
|
|
if (r < 0)
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to list catalog: %m");
|
2013-03-29 01:17:24 +01:00
|
|
|
}
|
2012-11-15 23:03:31 +01:00
|
|
|
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_FLUSH:
|
|
|
|
r = flush_to_var();
|
|
|
|
goto finish;
|
|
|
|
|
|
|
|
case ACTION_SYNC:
|
|
|
|
r = sync_journal();
|
|
|
|
goto finish;
|
|
|
|
|
|
|
|
case ACTION_ROTATE:
|
|
|
|
r = rotate();
|
|
|
|
goto finish;
|
|
|
|
|
|
|
|
case ACTION_SHOW:
|
|
|
|
case ACTION_PRINT_HEADER:
|
|
|
|
case ACTION_VERIFY:
|
|
|
|
case ACTION_DISK_USAGE:
|
|
|
|
case ACTION_LIST_BOOTS:
|
|
|
|
case ACTION_VACUUM:
|
2016-01-27 19:01:42 +01:00
|
|
|
case ACTION_LIST_FIELDS:
|
|
|
|
case ACTION_LIST_FIELD_NAMES:
|
2015-11-11 16:21:30 +01:00
|
|
|
/* These ones require access to the journal files, continue below. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert_not_reached("Unknown action");
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:08:38 +02:00
|
|
|
if (arg_directory)
|
2013-06-05 01:33:34 +02:00
|
|
|
r = sd_journal_open_directory(&j, arg_directory, arg_journal_type);
|
2016-04-25 00:31:24 +02:00
|
|
|
else if (arg_file_stdin) {
|
|
|
|
int ifd = STDIN_FILENO;
|
|
|
|
r = sd_journal_open_files_fd(&j, &ifd, 1, 0);
|
|
|
|
} else if (arg_file)
|
2013-06-06 01:30:17 +02:00
|
|
|
r = sd_journal_open_files(&j, (const char**) arg_file, 0);
|
2013-12-11 22:04:03 +01:00
|
|
|
else if (arg_machine)
|
|
|
|
r = sd_journal_open_container(&j, arg_machine, 0);
|
2012-07-11 01:08:38 +02:00
|
|
|
else
|
2013-06-05 01:33:34 +02:00
|
|
|
r = sd_journal_open(&j, !arg_merge*SD_JOURNAL_LOCAL_ONLY + arg_journal_type);
|
2012-07-11 01:08:38 +02:00
|
|
|
if (r < 0) {
|
2015-11-11 16:21:30 +01:00
|
|
|
log_error_errno(r, "Failed to open %s: %m", arg_directory ?: arg_file ? "files" : "journal");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-07-11 01:08:38 +02:00
|
|
|
}
|
|
|
|
|
2013-03-11 23:03:13 +01:00
|
|
|
r = access_check(j);
|
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-03-11 23:03:13 +01:00
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
switch (arg_action) {
|
2012-08-15 01:54:09 +02:00
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_NEW_ID128:
|
|
|
|
case ACTION_SETUP_KEYS:
|
|
|
|
case ACTION_LIST_CATALOG:
|
|
|
|
case ACTION_DUMP_CATALOG:
|
|
|
|
case ACTION_UPDATE_CATALOG:
|
|
|
|
case ACTION_FLUSH:
|
|
|
|
case ACTION_SYNC:
|
|
|
|
case ACTION_ROTATE:
|
|
|
|
assert_not_reached("Unexpected action.");
|
|
|
|
|
|
|
|
case ACTION_PRINT_HEADER:
|
2012-07-16 22:24:02 +02:00
|
|
|
journal_print_header(j);
|
2015-05-18 23:50:34 +02:00
|
|
|
r = 0;
|
|
|
|
goto finish;
|
2012-07-16 22:24:02 +02:00
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_VERIFY:
|
|
|
|
r = verify(j);
|
|
|
|
goto finish;
|
|
|
|
|
|
|
|
case ACTION_DISK_USAGE: {
|
2014-02-19 17:47:11 +01:00
|
|
|
uint64_t bytes = 0;
|
2012-09-07 23:20:28 +02:00
|
|
|
char sbytes[FORMAT_BYTES_MAX];
|
|
|
|
|
|
|
|
r = sd_journal_get_usage(j, &bytes);
|
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-09-07 23:20:28 +02:00
|
|
|
|
2014-11-03 23:08:33 +01:00
|
|
|
printf("Archived and active journals take up %s on disk.\n",
|
2013-03-18 04:36:25 +01:00
|
|
|
format_bytes(sbytes, sizeof(sbytes), bytes));
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-09-07 23:20:28 +02:00
|
|
|
}
|
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_LIST_BOOTS:
|
|
|
|
r = list_boots(j);
|
|
|
|
goto finish;
|
|
|
|
|
|
|
|
case ACTION_VACUUM: {
|
2014-11-03 23:08:33 +01:00
|
|
|
Directory *d;
|
|
|
|
Iterator i;
|
|
|
|
|
|
|
|
HASHMAP_FOREACH(d, j->directories_by_path, i) {
|
|
|
|
int q;
|
|
|
|
|
|
|
|
if (d->is_root)
|
|
|
|
continue;
|
|
|
|
|
2015-10-02 23:21:59 +02:00
|
|
|
q = journal_directory_vacuum(d->path, arg_vacuum_size, arg_vacuum_n_files, arg_vacuum_time, NULL, true);
|
2014-11-03 23:08:33 +01:00
|
|
|
if (q < 0) {
|
2015-10-02 23:21:59 +02:00
|
|
|
log_error_errno(q, "Failed to vacuum %s: %m", d->path);
|
2014-11-03 23:08:33 +01:00
|
|
|
r = q;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2014-11-03 23:08:33 +01:00
|
|
|
}
|
|
|
|
|
2016-01-27 19:01:42 +01:00
|
|
|
case ACTION_LIST_FIELD_NAMES: {
|
|
|
|
const char *field;
|
|
|
|
|
|
|
|
SD_JOURNAL_FOREACH_FIELD(j, field) {
|
|
|
|
printf("%s\n", field);
|
2016-02-23 05:32:04 +01:00
|
|
|
n_shown++;
|
2016-01-27 19:01:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2015-11-11 16:21:30 +01:00
|
|
|
case ACTION_SHOW:
|
2016-01-27 19:01:42 +01:00
|
|
|
case ACTION_LIST_FIELDS:
|
2015-11-11 16:21:30 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert_not_reached("Unknown action");
|
2013-10-29 04:43:57 +01:00
|
|
|
}
|
|
|
|
|
2016-02-01 09:25:22 +01:00
|
|
|
if (arg_boot_offset != 0 &&
|
|
|
|
sd_journal_has_runtime_files(j) > 0 &&
|
|
|
|
sd_journal_has_persistent_files(j) == 0) {
|
|
|
|
log_info("Specifying boot ID has no effect, no persistent journal was found");
|
|
|
|
r = 0;
|
|
|
|
goto finish;
|
|
|
|
}
|
2013-06-28 17:26:30 +02:00
|
|
|
/* add_boot() must be called first!
|
|
|
|
* It may need to seek the journal to find parent boot IDs. */
|
|
|
|
r = add_boot(j);
|
2012-07-11 01:08:38 +02:00
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-07-11 01:08:38 +02:00
|
|
|
|
2013-05-15 05:08:00 +02:00
|
|
|
r = add_dmesg(j);
|
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-05-15 05:08:00 +02:00
|
|
|
|
2013-04-12 09:14:43 +02:00
|
|
|
r = add_units(j);
|
2013-12-29 01:47:36 +01:00
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to add filter for units: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-12-29 01:47:36 +01:00
|
|
|
}
|
2012-10-16 02:59:27 +02:00
|
|
|
|
2014-08-19 11:27:34 +02:00
|
|
|
r = add_syslog_identifier(j);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to add filter for syslog identifiers: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2014-08-19 11:27:34 +02:00
|
|
|
}
|
|
|
|
|
2013-04-11 15:27:55 +02:00
|
|
|
r = add_priorities(j);
|
2015-05-19 00:25:45 +02:00
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-07-11 01:08:38 +02:00
|
|
|
|
2013-04-11 15:27:55 +02:00
|
|
|
r = add_matches(j, argv + optind);
|
2015-05-19 00:25:45 +02:00
|
|
|
if (r < 0)
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-07-27 10:31:33 +02:00
|
|
|
|
2015-01-06 06:29:40 +01:00
|
|
|
if (_unlikely_(log_get_max_level() >= LOG_DEBUG)) {
|
2013-07-16 20:45:28 +02:00
|
|
|
_cleanup_free_ char *filter;
|
|
|
|
|
|
|
|
filter = journal_make_match_string(j);
|
2015-05-19 00:25:45 +02:00
|
|
|
if (!filter)
|
|
|
|
return log_oom();
|
|
|
|
|
2013-07-16 20:45:28 +02:00
|
|
|
log_debug("Journal filter: %s", filter);
|
|
|
|
}
|
2013-01-28 05:53:52 +01:00
|
|
|
|
2016-01-27 19:01:42 +01:00
|
|
|
if (arg_action == ACTION_LIST_FIELDS) {
|
2012-10-18 03:33:44 +02:00
|
|
|
const void *data;
|
|
|
|
size_t size;
|
|
|
|
|
2016-01-27 19:01:42 +01:00
|
|
|
assert(arg_field);
|
|
|
|
|
2013-04-17 05:07:45 +02:00
|
|
|
r = sd_journal_set_data_threshold(j, 0);
|
|
|
|
if (r < 0) {
|
2015-05-19 00:25:45 +02:00
|
|
|
log_error_errno(r, "Failed to unset data size threshold: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-04-17 05:07:45 +02:00
|
|
|
}
|
|
|
|
|
2012-10-18 03:33:44 +02:00
|
|
|
r = sd_journal_query_unique(j, arg_field);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to query unique data objects: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-10-18 03:33:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
SD_JOURNAL_FOREACH_UNIQUE(j, data, size) {
|
|
|
|
const void *eq;
|
|
|
|
|
2013-01-28 05:53:52 +01:00
|
|
|
if (arg_lines >= 0 && n_shown >= arg_lines)
|
2012-10-18 22:55:12 +02:00
|
|
|
break;
|
|
|
|
|
2012-10-18 03:33:44 +02:00
|
|
|
eq = memchr(data, '=', size);
|
|
|
|
if (eq)
|
|
|
|
printf("%.*s\n", (int) (size - ((const uint8_t*) eq - (const uint8_t*) data + 1)), (const char*) eq + 1);
|
|
|
|
else
|
|
|
|
printf("%.*s\n", (int) size, (const char*) data);
|
2012-10-18 22:55:12 +02:00
|
|
|
|
2016-02-23 05:32:04 +01:00
|
|
|
n_shown++;
|
2012-10-18 03:33:44 +02:00
|
|
|
}
|
|
|
|
|
2015-05-18 23:50:34 +02:00
|
|
|
r = 0;
|
|
|
|
goto finish;
|
2012-10-18 03:33:44 +02:00
|
|
|
}
|
|
|
|
|
2013-06-06 01:30:17 +02:00
|
|
|
/* Opening the fd now means the first sd_journal_wait() will actually wait */
|
|
|
|
if (arg_follow) {
|
|
|
|
r = sd_journal_get_fd(j);
|
2016-04-25 00:31:24 +02:00
|
|
|
if (r == -EMEDIUMTYPE) {
|
|
|
|
log_error_errno(r, "The --follow switch is not supported in conjunction with reading from STDIN.");
|
|
|
|
goto finish;
|
|
|
|
}
|
2015-05-19 00:25:45 +02:00
|
|
|
if (r < 0) {
|
|
|
|
log_error_errno(r, "Failed to get journal fd: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2015-05-19 00:25:45 +02:00
|
|
|
}
|
2013-06-06 01:30:17 +02:00
|
|
|
}
|
|
|
|
|
2013-07-16 16:21:18 +02:00
|
|
|
if (arg_cursor || arg_after_cursor) {
|
2014-03-29 05:37:25 +01:00
|
|
|
r = sd_journal_seek_cursor(j, arg_cursor ?: arg_after_cursor);
|
2012-06-09 10:32:38 +02:00
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to cursor: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-06-09 10:32:38 +02:00
|
|
|
}
|
2015-05-18 23:50:34 +02:00
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
if (!arg_reverse)
|
2013-07-16 16:21:18 +02:00
|
|
|
r = sd_journal_next_skip(j, 1 + !!arg_after_cursor);
|
2013-03-01 10:27:10 +01:00
|
|
|
else
|
2013-07-16 16:21:18 +02:00
|
|
|
r = sd_journal_previous_skip(j, 1 + !!arg_after_cursor);
|
|
|
|
|
2014-12-01 08:27:00 +01:00
|
|
|
if (arg_after_cursor && r < 2) {
|
2013-07-16 16:21:18 +02:00
|
|
|
/* We couldn't find the next entry after the cursor. */
|
2014-12-01 08:27:00 +01:00
|
|
|
if (arg_follow)
|
|
|
|
need_seek = true;
|
|
|
|
else
|
|
|
|
arg_lines = 0;
|
|
|
|
}
|
2012-06-09 10:32:38 +02:00
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
} else if (arg_since_set && !arg_reverse) {
|
2012-10-11 16:42:46 +02:00
|
|
|
r = sd_journal_seek_realtime_usec(j, arg_since);
|
2012-09-27 23:25:23 +02:00
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to date: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-09-27 23:25:23 +02:00
|
|
|
}
|
|
|
|
r = sd_journal_next(j);
|
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
} else if (arg_until_set && arg_reverse) {
|
|
|
|
r = sd_journal_seek_realtime_usec(j, arg_until);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to date: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-03-01 10:27:10 +01:00
|
|
|
}
|
|
|
|
r = sd_journal_previous(j);
|
|
|
|
|
2013-01-28 05:53:52 +01:00
|
|
|
} else if (arg_lines >= 0) {
|
2012-01-04 02:14:42 +01:00
|
|
|
r = sd_journal_seek_tail(j);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to tail: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-01-04 02:14:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
r = sd_journal_previous_skip(j, arg_lines);
|
2012-09-27 23:25:23 +02:00
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
} else if (arg_reverse) {
|
|
|
|
r = sd_journal_seek_tail(j);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to tail: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2013-03-01 10:27:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
r = sd_journal_previous(j);
|
|
|
|
|
2012-01-04 02:14:42 +01:00
|
|
|
} else {
|
|
|
|
r = sd_journal_seek_head(j);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to seek to head: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2012-01-04 02:14:42 +01:00
|
|
|
}
|
2012-01-04 04:00:14 +01:00
|
|
|
|
|
|
|
r = sd_journal_next(j);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to iterate through journal: %m");
|
2015-05-18 23:50:34 +02:00
|
|
|
goto finish;
|
2011-12-19 22:35:46 +01:00
|
|
|
}
|
2015-07-24 02:10:32 +02:00
|
|
|
if (r == 0) {
|
2015-08-14 16:38:41 +02:00
|
|
|
if (arg_follow)
|
|
|
|
need_seek = true;
|
|
|
|
else {
|
2015-11-17 07:06:52 +01:00
|
|
|
if (!arg_quiet)
|
|
|
|
printf("-- No entries --\n");
|
2015-08-14 16:38:41 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
2015-07-24 02:10:32 +02:00
|
|
|
}
|
2011-10-07 21:06:39 +02:00
|
|
|
|
2013-12-12 00:22:48 +01:00
|
|
|
if (!arg_follow)
|
2016-02-19 19:25:13 +01:00
|
|
|
pager_open(arg_no_pager, arg_pager_end);
|
2011-12-21 18:59:56 +01:00
|
|
|
|
2012-10-11 16:42:46 +02:00
|
|
|
if (!arg_quiet) {
|
|
|
|
usec_t start, end;
|
|
|
|
char start_buf[FORMAT_TIMESTAMP_MAX], end_buf[FORMAT_TIMESTAMP_MAX];
|
|
|
|
|
|
|
|
r = sd_journal_get_cutoff_realtime_usec(j, &start, &end);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to get cutoff: %m");
|
2012-10-11 16:42:46 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (r > 0) {
|
|
|
|
if (arg_follow)
|
2012-10-16 01:09:09 +02:00
|
|
|
printf("-- Logs begin at %s. --\n",
|
2014-10-08 22:37:45 +02:00
|
|
|
format_timestamp_maybe_utc(start_buf, sizeof(start_buf), start));
|
2012-10-11 16:42:46 +02:00
|
|
|
else
|
2012-10-16 01:09:09 +02:00
|
|
|
printf("-- Logs begin at %s, end at %s. --\n",
|
2014-10-08 22:37:45 +02:00
|
|
|
format_timestamp_maybe_utc(start_buf, sizeof(start_buf), start),
|
|
|
|
format_timestamp_maybe_utc(end_buf, sizeof(end_buf), end));
|
2012-10-11 16:42:46 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-19 22:35:46 +01:00
|
|
|
for (;;) {
|
2013-01-28 05:53:52 +01:00
|
|
|
while (arg_lines < 0 || n_shown < arg_lines || (arg_follow && !first_line)) {
|
2012-10-11 16:42:46 +02:00
|
|
|
int flags;
|
|
|
|
|
2012-01-04 04:00:14 +01:00
|
|
|
if (need_seek) {
|
2013-03-07 06:40:30 +01:00
|
|
|
if (!arg_reverse)
|
2013-03-01 10:27:10 +01:00
|
|
|
r = sd_journal_next(j);
|
|
|
|
else
|
|
|
|
r = sd_journal_previous(j);
|
2012-01-04 04:00:14 +01:00
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to iterate through journal: %m");
|
2012-01-04 04:00:14 +01:00
|
|
|
goto finish;
|
|
|
|
}
|
2013-06-10 03:50:56 +02:00
|
|
|
if (r == 0)
|
|
|
|
break;
|
2011-12-21 18:59:56 +01:00
|
|
|
}
|
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
if (arg_until_set && !arg_reverse) {
|
2012-10-11 16:42:46 +02:00
|
|
|
usec_t usec;
|
|
|
|
|
|
|
|
r = sd_journal_get_realtime_usec(j, &usec);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to determine timestamp: %m");
|
2012-10-11 16:42:46 +02:00
|
|
|
goto finish;
|
|
|
|
}
|
2013-02-24 15:27:51 +01:00
|
|
|
if (usec > arg_until)
|
|
|
|
goto finish;
|
2012-10-11 16:42:46 +02:00
|
|
|
}
|
|
|
|
|
2013-03-01 10:27:10 +01:00
|
|
|
if (arg_since_set && arg_reverse) {
|
|
|
|
usec_t usec;
|
|
|
|
|
|
|
|
r = sd_journal_get_realtime_usec(j, &usec);
|
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to determine timestamp: %m");
|
2013-03-01 10:27:10 +01:00
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
if (usec < arg_since)
|
|
|
|
goto finish;
|
|
|
|
}
|
|
|
|
|
2014-09-26 16:49:55 +02:00
|
|
|
if (!arg_merge && !arg_quiet) {
|
2012-09-06 01:52:46 +02:00
|
|
|
sd_id128_t boot_id;
|
2012-07-11 01:36:55 +02:00
|
|
|
|
2012-09-06 01:52:46 +02:00
|
|
|
r = sd_journal_get_monotonic_usec(j, NULL, &boot_id);
|
|
|
|
if (r >= 0) {
|
|
|
|
if (previous_boot_id_valid &&
|
|
|
|
!sd_id128_equal(boot_id, previous_boot_id))
|
2013-08-02 07:59:02 +02:00
|
|
|
printf("%s-- Reboot --%s\n",
|
2015-09-19 00:45:05 +02:00
|
|
|
ansi_highlight(), ansi_normal());
|
2012-09-06 01:52:46 +02:00
|
|
|
|
|
|
|
previous_boot_id = boot_id;
|
|
|
|
previous_boot_id_valid = true;
|
|
|
|
}
|
2012-07-11 01:36:55 +02:00
|
|
|
}
|
|
|
|
|
2012-10-11 16:42:46 +02:00
|
|
|
flags =
|
2012-10-18 23:22:56 +02:00
|
|
|
arg_all * OUTPUT_SHOW_ALL |
|
2013-10-07 03:55:18 +02:00
|
|
|
arg_full * OUTPUT_FULL_WIDTH |
|
2016-01-19 10:17:19 +01:00
|
|
|
colors_enabled() * OUTPUT_COLOR |
|
2014-10-02 14:39:29 +02:00
|
|
|
arg_catalog * OUTPUT_CATALOG |
|
2016-04-20 20:09:57 +02:00
|
|
|
arg_utc * OUTPUT_UTC |
|
|
|
|
arg_no_hostname * OUTPUT_NO_HOSTNAME;
|
2012-10-11 16:42:46 +02:00
|
|
|
|
2013-08-04 01:38:13 +02:00
|
|
|
r = output_journal(stdout, j, arg_output, 0, flags, &ellipsized);
|
2013-06-10 03:50:56 +02:00
|
|
|
need_seek = true;
|
|
|
|
if (r == -EADDRNOTAVAIL)
|
|
|
|
break;
|
|
|
|
else if (r < 0 || ferror(stdout))
|
2011-12-21 18:17:22 +01:00
|
|
|
goto finish;
|
2012-01-04 04:00:14 +01:00
|
|
|
|
2012-10-11 16:42:46 +02:00
|
|
|
n_shown++;
|
2011-10-07 21:06:39 +02:00
|
|
|
}
|
|
|
|
|
2013-07-16 16:21:18 +02:00
|
|
|
if (!arg_follow) {
|
|
|
|
if (arg_show_cursor) {
|
|
|
|
_cleanup_free_ char *cursor = NULL;
|
|
|
|
|
|
|
|
r = sd_journal_get_cursor(j, &cursor);
|
|
|
|
if (r < 0 && r != -EADDRNOTAVAIL)
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Failed to get cursor: %m");
|
2013-07-16 16:21:18 +02:00
|
|
|
else if (r >= 0)
|
|
|
|
printf("-- cursor: %s\n", cursor);
|
|
|
|
}
|
|
|
|
|
2011-12-19 22:35:46 +01:00
|
|
|
break;
|
2013-07-16 16:21:18 +02:00
|
|
|
}
|
2011-12-19 22:35:46 +01:00
|
|
|
|
2012-07-10 21:46:11 +02:00
|
|
|
r = sd_journal_wait(j, (uint64_t) -1);
|
2011-12-19 22:35:46 +01:00
|
|
|
if (r < 0) {
|
2014-11-28 13:19:16 +01:00
|
|
|
log_error_errno(r, "Couldn't wait for journal event: %m");
|
2011-12-19 22:35:46 +01:00
|
|
|
goto finish;
|
|
|
|
}
|
2013-01-28 05:53:52 +01:00
|
|
|
|
|
|
|
first_line = false;
|
2011-11-08 18:20:03 +01:00
|
|
|
}
|
2011-10-07 21:06:39 +02:00
|
|
|
|
|
|
|
finish:
|
2011-12-21 18:59:56 +01:00
|
|
|
pager_close();
|
|
|
|
|
2014-03-29 16:58:32 +01:00
|
|
|
strv_free(arg_file);
|
|
|
|
|
2015-05-18 23:54:05 +02:00
|
|
|
strv_free(arg_syslog_identifier);
|
|
|
|
strv_free(arg_system_units);
|
|
|
|
strv_free(arg_user_units);
|
|
|
|
|
2015-10-22 19:54:29 +02:00
|
|
|
free(arg_root);
|
|
|
|
|
2011-10-14 04:44:50 +02:00
|
|
|
return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
|
2011-10-07 21:06:39 +02:00
|
|
|
}
|