journalctl: port JSON output mode to new JSON API

Also, while we are at it, beef it up, by adding json-seq support (i.e.
https://tools.ietf.org/html/rfc7464). This is particularly useful in
conjunction with jq's --seq switch.
This commit is contained in:
Lennart Poettering 2018-07-23 20:22:30 +02:00
parent 02619c033f
commit 8e04444385
12 changed files with 232 additions and 180 deletions

View File

@ -316,10 +316,23 @@
<option>json</option> <option>json</option>
</term> </term>
<listitem> <listitem>
<para>formats entries as JSON data structures, one per <para>formats entries as JSON objects, separated by newline characters (see <ulink
line (see url="https://www.freedesktop.org/wiki/Software/systemd/json">Journal JSON Format</ulink> for more
<ulink url="https://www.freedesktop.org/wiki/Software/systemd/json">Journal JSON Format</ulink> information). Field values are generally encoded as JSON strings, with three exceptions:
for more information).</para> <orderedlist>
<listitem><para>Fields larger than 4096 bytes are encoded as <constant>null</constant> values. (This
may be turned off by passing <option>--all</option>, but be aware that this may allocate overly long
JSON objects.) </para></listitem>
<listitem><para>Journal entries permit non-unique fields within the same log entry. JSON does not allow
non-unique fields within objects. Due to this, if a non-unique field is encountered a JSON array is
used as field value, listing all field values as elements.</para></listitem>
<listitem><para>Fields containing non-printable or non-UTF8 bytes are encoded as arrays containing
the raw bytes individually formatted as unsigned numbers.</para></listitem>
</orderedlist>
Note that this encoding is reversible (with the exception of the size limit).</para>
</listitem> </listitem>
</varlistentry> </varlistentry>
@ -346,6 +359,19 @@
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term>
<option>json-seq</option>
</term>
<listitem>
<para>formats entries as JSON data structures, but prefixes them with an ASCII Record Separator
character (0x1E) and suffixes them with an ASCII Line Feed character (0x0A), in accordance with <ulink
url="https://tools.ietf.org/html/rfc7464">JavaScript Object Notation (JSON) Text Sequences </ulink>
(<literal>application/json-seq</literal>).
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term> <term>
<option>cat</option> <option>cat</option>
@ -375,14 +401,11 @@
<varlistentry> <varlistentry>
<term><option>--output-fields=</option></term> <term><option>--output-fields=</option></term>
<listitem><para>A comma separated list of the fields which should <listitem><para>A comma separated list of the fields which should be included in the output. This only has an
be included in the output. This only has an effect for the output modes effect for the output modes which would normally show all fields (<option>verbose</option>,
which would normally show all fields (<option>verbose</option>, <option>export</option>, <option>json</option>, <option>json-pretty</option>, <option>json-sse</option> and
<option>export</option>, <option>json</option>, <option>json-seq</option>). The <literal>__CURSOR</literal>, <literal>__REALTIME_TIMESTAMP</literal>,
<option>json-pretty</option>, and <option>json-sse</option>). The <literal>__MONOTONIC_TIMESTAMP</literal>, and <literal>_BOOT_ID</literal> fields are always
<literal>__CURSOR</literal>, <literal>__REALTIME_TIMESTAMP</literal>,
<literal>__MONOTONIC_TIMESTAMP</literal>, and
<literal>_BOOT_ID</literal> fields are always
printed.</para></listitem> printed.</para></listitem>
</varlistentry> </varlistentry>

View File

@ -66,7 +66,7 @@ _journalctl() {
compopt -o filenames compopt -o filenames
;; ;;
--output|-o) --output|-o)
comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse cat with-unit' comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse json-seq cat with-unit'
;; ;;
--field|-F) --field|-F)
comps=$(journalctl --fields | sort 2>/dev/null) comps=$(journalctl --fields | sort 2>/dev/null)

View File

@ -77,7 +77,7 @@ _machinectl() {
comps='' comps=''
;; ;;
--output|-o) --output|-o)
comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse cat' comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse json-seq cat with-unit'
;; ;;
esac esac
COMPREPLY=( $(compgen -W '$comps' -- "$cur") ) COMPREPLY=( $(compgen -W '$comps' -- "$cur") )

View File

@ -169,7 +169,7 @@ _systemctl () {
;; ;;
--output|-o) --output|-o)
comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json comps='short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json
json-pretty json-sse cat' json-pretty json-sse json-seq cat with-unit'
;; ;;
--machine|-M) --machine|-M)
comps=$( __get_machines ) comps=$( __get_machines )

View File

@ -2,5 +2,5 @@
# SPDX-License-Identifier: LGPL-2.1+ # SPDX-License-Identifier: LGPL-2.1+
local -a _output_opts local -a _output_opts
_output_opts=(short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse cat with-unit) _output_opts=(short short-full short-iso short-iso-precise short-precise short-monotonic short-unix verbose export json json-pretty json-sse json-seq cat with-unit)
_describe -t output 'output mode' _output_opts || compadd "$@" _describe -t output 'output mode' _output_opts || compadd "$@"

View File

@ -58,6 +58,7 @@ static const char* const mime_types[_OUTPUT_MODE_MAX] = {
[OUTPUT_SHORT] = "text/plain", [OUTPUT_SHORT] = "text/plain",
[OUTPUT_JSON] = "application/json", [OUTPUT_JSON] = "application/json",
[OUTPUT_JSON_SSE] = "text/event-stream", [OUTPUT_JSON_SSE] = "text/event-stream",
[OUTPUT_JSON_SEQ] = "application/json-seq",
[OUTPUT_EXPORT] = "application/vnd.fdo.journal", [OUTPUT_EXPORT] = "application/vnd.fdo.journal",
}; };
@ -267,6 +268,8 @@ static int request_parse_accept(
m->mode = OUTPUT_JSON; m->mode = OUTPUT_JSON;
else if (streq(header, mime_types[OUTPUT_JSON_SSE])) else if (streq(header, mime_types[OUTPUT_JSON_SSE]))
m->mode = OUTPUT_JSON_SSE; m->mode = OUTPUT_JSON_SSE;
else if (streq(header, mime_types[OUTPUT_JSON_SEQ]))
m->mode = OUTPUT_JSON_SEQ;
else if (streq(header, mime_types[OUTPUT_EXPORT])) else if (streq(header, mime_types[OUTPUT_EXPORT]))
m->mode = OUTPUT_EXPORT; m->mode = OUTPUT_EXPORT;
else else

View File

@ -330,7 +330,8 @@ static int help(void) {
" -o --output=STRING Change journal output mode (short, short-precise,\n" " -o --output=STRING Change journal output mode (short, short-precise,\n"
" short-iso, short-iso-precise, short-full,\n" " short-iso, short-iso-precise, short-full,\n"
" short-monotonic, short-unix, verbose, export,\n" " short-monotonic, short-unix, verbose, export,\n"
" json, json-pretty, json-sse, cat, with-unit)\n" " json, json-pretty, json-sse, json-seq, cat,\n"
" with-unit)\n"
" --output-fields=LIST Select fields to print in verbose/export/json modes\n" " --output-fields=LIST Select fields to print in verbose/export/json modes\n"
" --utc Express time in Coordinated Universal Time (UTC)\n" " --utc Express time in Coordinated Universal Time (UTC)\n"
" -x --catalog Add message explanations where available\n" " -x --catalog Add message explanations where available\n"
@ -516,7 +517,7 @@ static int parse_argv(int argc, char *argv[]) {
return -EINVAL; return -EINVAL;
} }
if (IN_SET(arg_output, OUTPUT_EXPORT, OUTPUT_JSON, OUTPUT_JSON_PRETTY, OUTPUT_JSON_SSE, OUTPUT_CAT)) if (IN_SET(arg_output, OUTPUT_EXPORT, OUTPUT_JSON, OUTPUT_JSON_PRETTY, OUTPUT_JSON_SSE, OUTPUT_JSON_SEQ, OUTPUT_CAT))
arg_quiet = true; arg_quiet = true;
break; break;

View File

@ -1308,7 +1308,8 @@ static int help(int argc, char *argv[], void *userdata) {
" -o --output=STRING Change journal output mode (short, short-precise,\n" " -o --output=STRING Change journal output mode (short, short-precise,\n"
" short-iso, short-iso-precise, short-full,\n" " short-iso, short-iso-precise, short-full,\n"
" short-monotonic, short-unix, verbose, export,\n" " short-monotonic, short-unix, verbose, export,\n"
" json, json-pretty, json-sse, cat)\n" " json, json-pretty, json-sse, json-seq, cat,\n"
" with-unit)\n"
"Session Commands:\n" "Session Commands:\n"
" list-sessions List sessions\n" " list-sessions List sessions\n"
" session-status [ID...] Show session status\n" " session-status [ID...] Show session status\n"

View File

@ -2642,7 +2642,8 @@ static int help(int argc, char *argv[], void *userdata) {
" -o --output=STRING Change journal output mode (short, short-precise,\n" " -o --output=STRING Change journal output mode (short, short-precise,\n"
" short-iso, short-iso-precise, short-full,\n" " short-iso, short-iso-precise, short-full,\n"
" short-monotonic, short-unix, verbose, export,\n" " short-monotonic, short-unix, verbose, export,\n"
" json, json-pretty, json-sse, cat)\n" " json, json-pretty, json-sse, json-seq, cat,\n"
" with-unit)\n"
" --verify=MODE Verification mode for downloaded images (no,\n" " --verify=MODE Verification mode for downloaded images (no,\n"
" checksum, signature)\n" " checksum, signature)\n"
" --force Download image even if already exists\n\n" " --force Download image even if already exists\n\n"

View File

@ -21,6 +21,7 @@
#include "hostname-util.h" #include "hostname-util.h"
#include "io-util.h" #include "io-util.h"
#include "journal-internal.h" #include "journal-internal.h"
#include "json.h"
#include "log.h" #include "log.h"
#include "logs-show.h" #include "logs-show.h"
#include "macro.h" #include "macro.h"
@ -41,7 +42,7 @@
#define PRINT_LINE_THRESHOLD 3 #define PRINT_LINE_THRESHOLD 3
#define PRINT_CHAR_THRESHOLD 300 #define PRINT_CHAR_THRESHOLD 300
#define JSON_THRESHOLD 4096 #define JSON_THRESHOLD 4096U
static int print_catalog(FILE *f, sd_journal *j) { static int print_catalog(FILE *f, sd_journal *j) {
int r; int r;
@ -747,6 +748,96 @@ void json_escape(
} }
} }
struct json_data {
JsonVariant* name;
size_t n_values;
JsonVariant* values[];
};
static int update_json_data(
Hashmap *h,
OutputFlags flags,
const char *name,
const void *value,
size_t size) {
_cleanup_(json_variant_unrefp) JsonVariant *v = NULL;
struct json_data *d;
int r;
if (!(flags & OUTPUT_SHOW_ALL) && strlen(name) + 1 + size >= JSON_THRESHOLD)
r = json_variant_new_null(&v);
else if (utf8_is_printable(value, size))
r = json_variant_new_stringn(&v, value, size);
else
r = json_variant_new_array_bytes(&v, value, size);
if (r < 0)
return log_error_errno(r, "Failed to allocate JSON data: %m");
d = hashmap_get(h, name);
if (d) {
struct json_data *w;
w = realloc(d, offsetof(struct json_data, values) + sizeof(JsonVariant*) * (d->n_values + 1));
if (!w)
return log_oom();
d = w;
assert_se(hashmap_update(h, json_variant_string(d->name), d) >= 0);
} else {
_cleanup_(json_variant_unrefp) JsonVariant *n = NULL;
r = json_variant_new_string(&n, name);
if (r < 0)
return log_error_errno(r, "Failed to allocate JSON name variant: %m");
d = malloc0(offsetof(struct json_data, values) + sizeof(JsonVariant*));
if (!d)
return log_oom();
r = hashmap_put(h, json_variant_string(n), d);
if (r < 0) {
free(d);
return log_error_errno(r, "Failed to insert JSON name into hashmap: %m");
}
d->name = TAKE_PTR(n);
}
d->values[d->n_values++] = TAKE_PTR(v);
return 0;
}
static int update_json_data_split(
Hashmap *h,
OutputFlags flags,
Set *output_fields,
const void *data,
size_t size) {
const char *eq;
char *name;
assert(h);
assert(data || size == 0);
if (memory_startswith(data, size, "_BOOT_ID="))
return 0;
eq = memchr(data, '=', MIN(size, JSON_THRESHOLD));
if (!eq)
return 0;
if (eq == data)
return 0;
name = strndupa(data, eq - (const char*) data);
if (output_fields && !set_get(output_fields, name))
return 0;
return update_json_data(h, flags, name, eq + 1, size - (eq - (const char*) data) - 1);
}
static int output_json( static int output_json(
FILE *f, FILE *f,
sd_journal *j, sd_journal *j,
@ -756,19 +847,21 @@ static int output_json(
Set *output_fields, Set *output_fields,
const size_t highlight[2]) { const size_t highlight[2]) {
uint64_t realtime, monotonic; char sid[SD_ID128_STRING_MAX], usecbuf[DECIMAL_STR_MAX(usec_t)];
_cleanup_(json_variant_unrefp) JsonVariant *object = NULL;
_cleanup_free_ char *cursor = NULL; _cleanup_free_ char *cursor = NULL;
const void *data; uint64_t realtime, monotonic;
size_t length; JsonVariant **array = NULL;
struct json_data *d;
sd_id128_t boot_id; sd_id128_t boot_id;
char sid[33], *k;
int r;
Hashmap *h = NULL; Hashmap *h = NULL;
bool done, separator; size_t n = 0;
Iterator i;
int r;
assert(j); assert(j);
sd_journal_set_data_threshold(j, flags & OUTPUT_SHOW_ALL ? 0 : JSON_THRESHOLD); (void) sd_journal_set_data_threshold(j, flags & OUTPUT_SHOW_ALL ? 0 : JSON_THRESHOLD);
r = sd_journal_get_realtime_usec(j, &realtime); r = sd_journal_get_realtime_usec(j, &realtime);
if (r < 0) if (r < 0)
@ -782,182 +875,109 @@ static int output_json(
if (r < 0) if (r < 0)
return log_error_errno(r, "Failed to get cursor: %m"); return log_error_errno(r, "Failed to get cursor: %m");
if (mode == OUTPUT_JSON_PRETTY)
fprintf(f,
"{\n"
"\t\"__CURSOR\" : \"%s\",\n"
"\t\"__REALTIME_TIMESTAMP\" : \""USEC_FMT"\",\n"
"\t\"__MONOTONIC_TIMESTAMP\" : \""USEC_FMT"\",\n"
"\t\"_BOOT_ID\" : \"%s\"",
cursor,
realtime,
monotonic,
sd_id128_to_string(boot_id, sid));
else {
if (mode == OUTPUT_JSON_SSE)
fputs("data: ", f);
fprintf(f,
"{ \"__CURSOR\" : \"%s\", "
"\"__REALTIME_TIMESTAMP\" : \""USEC_FMT"\", "
"\"__MONOTONIC_TIMESTAMP\" : \""USEC_FMT"\", "
"\"_BOOT_ID\" : \"%s\"",
cursor,
realtime,
monotonic,
sd_id128_to_string(boot_id, sid));
}
h = hashmap_new(&string_hash_ops); h = hashmap_new(&string_hash_ops);
if (!h) if (!h)
return log_oom(); return log_oom();
/* First round, iterate through the entry and count how often each field appears */ r = update_json_data(h, flags, "__CURSOR", cursor, strlen(cursor));
JOURNAL_FOREACH_DATA_RETVAL(j, data, length, r) { if (r < 0)
const char *eq; goto finish;
char *n;
unsigned u;
if (memory_startswith(data, length, "_BOOT_ID=")) xsprintf(usecbuf, USEC_FMT, realtime);
continue; r = update_json_data(h, flags, "__REALTIME_TIMESTAMP", usecbuf, strlen(usecbuf));
if (r < 0)
goto finish;
eq = memchr(data, '=', length); xsprintf(usecbuf, USEC_FMT, monotonic);
if (!eq) r = update_json_data(h, flags, "__MONOTONIC_TIMESTAMP", usecbuf, strlen(usecbuf));
continue; if (r < 0)
goto finish;
n = memdup_suffix0(data, eq - (const char*) data); sd_id128_to_string(boot_id, sid);
if (!n) { r = update_json_data(h, flags, "_BOOT_ID", sid, strlen(sid));
r = log_oom(); if (r < 0)
goto finish;
for (;;) {
const void *data;
size_t size;
r = sd_journal_enumerate_data(j, &data, &size);
if (r == -EBADMSG) {
log_debug_errno(r, "Skipping message we can't read: %m");
r = 0;
goto finish; goto finish;
} }
if (r < 0) {
log_error_errno(r, "Failed to read journal: %m");
goto finish;
}
if (r == 0)
break;
u = PTR_TO_UINT(hashmap_get(h, n)); r = update_json_data_split(h, flags, output_fields, data, size);
if (u == 0) { if (r < 0)
r = hashmap_put(h, n, UINT_TO_PTR(1)); goto finish;
}
array = new(JsonVariant*, hashmap_size(h)*2);
if (!array) {
r = log_oom();
goto finish;
}
HASHMAP_FOREACH(d, h, i) {
assert(d->n_values > 0);
array[n++] = json_variant_ref(d->name);
if (d->n_values == 1)
array[n++] = json_variant_ref(d->values[0]);
else {
_cleanup_(json_variant_unrefp) JsonVariant *q = NULL;
r = json_variant_new_array(&q, d->values, d->n_values);
if (r < 0) { if (r < 0) {
free(n); log_error_errno(r, "Failed to create JSON array: %m");
log_oom();
goto finish;
}
} else {
r = hashmap_update(h, n, UINT_TO_PTR(u + 1));
free(n);
if (r < 0) {
log_oom();
goto finish; goto finish;
} }
array[n++] = TAKE_PTR(q);
} }
} }
if (r == -EBADMSG) {
log_debug_errno(r, "Skipping message we can't read: %m"); r = json_variant_new_object(&object, array, n);
return 0; if (r < 0) {
log_error_errno(r, "Failed to allocate JSON object: %m");
goto finish;
} }
if (r < 0)
return r;
separator = true; json_variant_dump(object,
do { (mode == OUTPUT_JSON_SSE ? JSON_FORMAT_SSE :
done = true; mode == OUTPUT_JSON_SEQ ? JSON_FORMAT_SEQ :
mode == OUTPUT_JSON_PRETTY ? JSON_FORMAT_PRETTY :
SD_JOURNAL_FOREACH_DATA(j, data, length) { JSON_FORMAT_NEWLINE) |
const char *eq; (FLAGS_SET(flags, OUTPUT_COLOR) ? JSON_FORMAT_COLOR : 0),
char *kk; f, NULL);
_cleanup_free_ char *n = NULL;
size_t m;
unsigned u;
/* We already printed the boot id from the data in
* the header, hence let's suppress it here */
if (memory_startswith(data, length, "_BOOT_ID="))
continue;
eq = memchr(data, '=', length);
if (!eq)
continue;
m = eq - (const char*) data;
n = memdup_suffix0(data, m);
if (!n) {
r = log_oom();
goto finish;
}
if (output_fields && !set_get(output_fields, n))
continue;
if (separator)
fputs(mode == OUTPUT_JSON_PRETTY ? ",\n\t" : ", ", f);
u = PTR_TO_UINT(hashmap_get2(h, n, (void**) &kk));
if (u == 0)
/* We already printed this, let's jump to the next */
separator = false;
else if (u == 1) {
/* Field only appears once, output it directly */
json_escape(f, data, m, flags);
fputs(" : ", f);
json_escape(f, eq + 1, length - m - 1, flags);
hashmap_remove(h, n);
free(kk);
separator = true;
} else {
/* Field appears multiple times, output it as array */
json_escape(f, data, m, flags);
fputs(" : [ ", f);
json_escape(f, eq + 1, length - m - 1, flags);
/* Iterate through the end of the list */
while (sd_journal_enumerate_data(j, &data, &length) > 0) {
if (length < m + 1)
continue;
if (memcmp(data, n, m) != 0)
continue;
if (((const char*) data)[m] != '=')
continue;
fputs(", ", f);
json_escape(f, (const char*) data + m + 1, length - m - 1, flags);
}
fputs(" ]", f);
hashmap_remove(h, n);
free(kk);
/* Iterate data fields form the beginning */
done = false;
separator = true;
break;
}
}
} while (!done);
if (mode == OUTPUT_JSON_PRETTY)
fputs("\n}\n", f);
else if (mode == OUTPUT_JSON_SSE)
fputs("}\n\n", f);
else
fputs(" }\n", f);
r = 0; r = 0;
finish: finish:
while ((k = hashmap_steal_first_key(h))) while ((d = hashmap_steal_first(h))) {
free(k); size_t k;
json_variant_unref(d->name);
for (k = 0; k < d->n_values; k++)
json_variant_unref(d->values[k]);
free(d);
}
hashmap_free(h); hashmap_free(h);
json_variant_unref_many(array, n);
free(array);
return r; return r;
} }
@ -1037,6 +1057,7 @@ static int (*output_funcs[_OUTPUT_MODE_MAX])(
[OUTPUT_JSON] = output_json, [OUTPUT_JSON] = output_json,
[OUTPUT_JSON_PRETTY] = output_json, [OUTPUT_JSON_PRETTY] = output_json,
[OUTPUT_JSON_SSE] = output_json, [OUTPUT_JSON_SSE] = output_json,
[OUTPUT_JSON_SEQ] = output_json,
[OUTPUT_CAT] = output_cat, [OUTPUT_CAT] = output_cat,
[OUTPUT_WITH_UNIT] = output_short, [OUTPUT_WITH_UNIT] = output_short,
}; };

View File

@ -16,6 +16,7 @@ static const char *const output_mode_table[_OUTPUT_MODE_MAX] = {
[OUTPUT_JSON] = "json", [OUTPUT_JSON] = "json",
[OUTPUT_JSON_PRETTY] = "json-pretty", [OUTPUT_JSON_PRETTY] = "json-pretty",
[OUTPUT_JSON_SSE] = "json-sse", [OUTPUT_JSON_SSE] = "json-sse",
[OUTPUT_JSON_SEQ] = "json-seq",
[OUTPUT_CAT] = "cat", [OUTPUT_CAT] = "cat",
[OUTPUT_WITH_UNIT] = "with-unit", [OUTPUT_WITH_UNIT] = "with-unit",
}; };

View File

@ -16,6 +16,7 @@ typedef enum OutputMode {
OUTPUT_JSON, OUTPUT_JSON,
OUTPUT_JSON_PRETTY, OUTPUT_JSON_PRETTY,
OUTPUT_JSON_SSE, OUTPUT_JSON_SSE,
OUTPUT_JSON_SEQ,
OUTPUT_CAT, OUTPUT_CAT,
OUTPUT_WITH_UNIT, OUTPUT_WITH_UNIT,
_OUTPUT_MODE_MAX, _OUTPUT_MODE_MAX,