2017-11-18 17:09:20 +01:00
|
|
|
/* SPDX-License-Identifier: LGPL-2.1+ */
|
2010-02-03 13:03:47 +01:00
|
|
|
|
2010-01-21 00:51:37 +01:00
|
|
|
#include <errno.h>
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
#include "sd-id128.h"
|
|
|
|
#include "sd-messages.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
|
2015-10-27 03:01:06 +01:00
|
|
|
#include "alloc-util.h"
|
2013-09-16 22:50:38 +02:00
|
|
|
#include "async.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "dbus-job.h"
|
2014-03-03 01:33:45 +01:00
|
|
|
#include "dbus.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "escape.h"
|
2018-10-17 18:36:24 +02:00
|
|
|
#include "fileio.h"
|
2015-10-26 16:18:16 +01:00
|
|
|
#include "job.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "log.h"
|
|
|
|
#include "macro.h"
|
2015-10-26 16:18:16 +01:00
|
|
|
#include "parse-util.h"
|
2018-10-17 20:40:09 +02:00
|
|
|
#include "serialize.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "set.h"
|
2019-03-13 12:14:47 +01:00
|
|
|
#include "sort-util.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "special.h"
|
2016-01-12 15:34:20 +01:00
|
|
|
#include "stdio-util.h"
|
2015-10-26 22:31:05 +01:00
|
|
|
#include "string-table.h"
|
2015-10-24 22:58:24 +02:00
|
|
|
#include "string-util.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "strv.h"
|
2015-04-10 23:15:59 +02:00
|
|
|
#include "terminal-util.h"
|
2015-10-23 18:52:53 +02:00
|
|
|
#include "unit.h"
|
|
|
|
#include "virt.h"
|
2012-04-20 12:28:31 +02:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
Job* job_new_raw(Unit *unit) {
|
2009-11-18 00:42:52 +01:00
|
|
|
Job *j;
|
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
/* used for deserialization */
|
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
assert(unit);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2018-10-17 19:07:15 +02:00
|
|
|
j = new(Job, 1);
|
2012-04-23 01:24:04 +02:00
|
|
|
if (!j)
|
2009-11-18 00:42:52 +01:00
|
|
|
return NULL;
|
|
|
|
|
2018-10-17 19:07:15 +02:00
|
|
|
*j = (Job) {
|
|
|
|
.manager = unit->manager,
|
|
|
|
.unit = unit,
|
|
|
|
.type = _JOB_TYPE_INVALID,
|
|
|
|
};
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
return j;
|
|
|
|
}
|
|
|
|
|
|
|
|
Job* job_new(Unit *unit, JobType type) {
|
|
|
|
Job *j;
|
|
|
|
|
|
|
|
assert(type < _JOB_TYPE_MAX);
|
|
|
|
|
|
|
|
j = job_new_raw(unit);
|
|
|
|
if (!j)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
j->id = j->manager->current_job_id++;
|
|
|
|
j->type = type;
|
|
|
|
|
2010-01-20 02:12:51 +01:00
|
|
|
/* We don't link it here, that's what job_dependency() is for */
|
2009-11-18 00:42:52 +01:00
|
|
|
|
|
|
|
return j;
|
|
|
|
}
|
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
void job_unlink(Job *j) {
|
2012-04-20 10:21:37 +02:00
|
|
|
assert(j);
|
|
|
|
assert(!j->installed);
|
2012-04-19 23:20:34 +02:00
|
|
|
assert(!j->transaction_prev);
|
|
|
|
assert(!j->transaction_next);
|
|
|
|
assert(!j->subject_list);
|
|
|
|
assert(!j->object_list);
|
2009-11-18 00:42:52 +01:00
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
if (j->in_run_queue) {
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_REMOVE(run_queue, j->manager->run_queue, j);
|
2018-04-24 15:19:38 +02:00
|
|
|
j->in_run_queue = false;
|
|
|
|
}
|
2010-02-05 00:38:41 +01:00
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
if (j->in_dbus_queue) {
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_REMOVE(dbus_queue, j->manager->dbus_job_queue, j);
|
2018-04-24 15:19:38 +02:00
|
|
|
j->in_dbus_queue = false;
|
|
|
|
}
|
2010-02-05 00:38:41 +01:00
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
if (j->in_gc_queue) {
|
2016-11-15 19:32:50 +01:00
|
|
|
LIST_REMOVE(gc_queue, j->manager->gc_job_queue, j);
|
2018-04-24 15:19:38 +02:00
|
|
|
j->in_gc_queue = false;
|
|
|
|
}
|
2016-11-15 19:32:50 +01:00
|
|
|
|
2018-04-24 15:19:38 +02:00
|
|
|
j->timer_event_source = sd_event_source_unref(j->timer_event_source);
|
|
|
|
}
|
|
|
|
|
2018-12-10 18:52:11 +01:00
|
|
|
Job* job_free(Job *j) {
|
2018-04-24 15:19:38 +02:00
|
|
|
assert(j);
|
|
|
|
assert(!j->installed);
|
|
|
|
assert(!j->transaction_prev);
|
|
|
|
assert(!j->transaction_next);
|
|
|
|
assert(!j->subject_list);
|
|
|
|
assert(!j->object_list);
|
|
|
|
|
|
|
|
job_unlink(j);
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2016-11-15 19:29:50 +01:00
|
|
|
sd_bus_track_unref(j->bus_track);
|
2014-08-06 11:53:00 +02:00
|
|
|
strv_free(j->deserialized_clients);
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2018-12-10 18:52:11 +01:00
|
|
|
return mfree(j);
|
2009-11-18 00:42:52 +01:00
|
|
|
}
|
2010-01-19 00:22:34 +01:00
|
|
|
|
2015-01-05 17:22:10 +01:00
|
|
|
static void job_set_state(Job *j, JobState state) {
|
|
|
|
assert(j);
|
|
|
|
assert(state >= 0);
|
|
|
|
assert(state < _JOB_STATE_MAX);
|
|
|
|
|
|
|
|
if (j->state == state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
j->state = state;
|
|
|
|
|
|
|
|
if (!j->installed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (j->state == JOB_RUNNING)
|
|
|
|
j->unit->manager->n_running_jobs++;
|
|
|
|
else {
|
|
|
|
assert(j->state == JOB_WAITING);
|
|
|
|
assert(j->unit->manager->n_running_jobs > 0);
|
|
|
|
|
|
|
|
j->unit->manager->n_running_jobs--;
|
|
|
|
|
|
|
|
if (j->unit->manager->n_running_jobs <= 0)
|
|
|
|
j->unit->manager->jobs_in_progress_event_source = sd_event_source_unref(j->unit->manager->jobs_in_progress_event_source);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-20 02:04:01 +02:00
|
|
|
void job_uninstall(Job *j) {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
Job **pj;
|
|
|
|
|
2012-04-20 02:04:01 +02:00
|
|
|
assert(j->installed);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
2015-01-05 17:22:10 +01:00
|
|
|
job_set_state(j, JOB_WAITING);
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
|
|
|
assert(*pj == j);
|
|
|
|
|
2012-04-20 02:04:01 +02:00
|
|
|
/* Detach from next 'bigger' objects */
|
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
/* daemon-reload should be transparent to job observers */
|
2016-02-24 21:36:09 +01:00
|
|
|
if (!MANAGER_IS_RELOADING(j->manager))
|
2012-04-23 01:24:04 +02:00
|
|
|
bus_job_send_removed_signal(j);
|
2012-04-20 02:04:01 +02:00
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
*pj = NULL;
|
|
|
|
|
2012-04-20 10:02:05 +02:00
|
|
|
unit_add_to_gc_queue(j->unit);
|
2012-04-20 02:04:01 +02:00
|
|
|
|
2019-02-12 02:00:21 +01:00
|
|
|
unit_add_to_dbus_queue(j->unit); /* The Job property of the unit has changed now */
|
|
|
|
|
2018-12-10 19:38:38 +01:00
|
|
|
hashmap_remove_value(j->manager->jobs, UINT32_TO_PTR(j->id), j);
|
2012-04-20 02:04:01 +02:00
|
|
|
j->installed = false;
|
|
|
|
}
|
|
|
|
|
2012-04-20 09:38:43 +02:00
|
|
|
static bool job_type_allows_late_merge(JobType t) {
|
|
|
|
/* Tells whether it is OK to merge a job of type 't' with an already
|
|
|
|
* running job.
|
|
|
|
* Reloads cannot be merged this way. Think of the sequence:
|
|
|
|
* 1. Reload of a daemon is in progress; the daemon has already loaded
|
|
|
|
* its config file, but hasn't completed the reload operation yet.
|
|
|
|
* 2. Edit foo's config file.
|
|
|
|
* 3. Trigger another reload to have the daemon use the new config.
|
|
|
|
* Should the second reload job be merged into the first one, the daemon
|
|
|
|
* would not know about the new config.
|
|
|
|
* JOB_RESTART jobs on the other hand can be merged, because they get
|
|
|
|
* patched into JOB_START after stopping the unit. So if we see a
|
|
|
|
* JOB_RESTART running, it means the unit hasn't stopped yet and at
|
|
|
|
* this time the merge is still allowed. */
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
return t != JOB_RELOAD;
|
2012-04-20 09:38:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void job_merge_into_installed(Job *j, Job *other) {
|
|
|
|
assert(j->installed);
|
|
|
|
assert(j->unit == other->unit);
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
if (j->type != JOB_NOP)
|
2018-06-12 23:20:04 +02:00
|
|
|
assert_se(job_type_merge_and_collapse(&j->type, other->type, j->unit) == 0);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
else
|
|
|
|
assert(other->type == JOB_NOP);
|
2012-04-20 09:38:43 +02:00
|
|
|
|
2013-02-22 11:21:37 +01:00
|
|
|
j->irreversible = j->irreversible || other->irreversible;
|
2013-01-25 19:54:21 +01:00
|
|
|
j->ignore_order = j->ignore_order || other->ignore_order;
|
2012-04-20 09:38:43 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Job* job_install(Job *j) {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
Job **pj;
|
|
|
|
Job *uj;
|
2012-04-20 02:04:01 +02:00
|
|
|
|
2012-04-20 09:38:43 +02:00
|
|
|
assert(!j->installed);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
2015-01-05 17:22:10 +01:00
|
|
|
assert(j->state == JOB_WAITING);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
|
|
|
pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
|
|
|
uj = *pj;
|
2012-04-20 09:38:43 +02:00
|
|
|
|
2012-04-20 02:04:01 +02:00
|
|
|
if (uj) {
|
2014-11-26 16:33:45 +01:00
|
|
|
if (job_type_is_conflicting(uj->type, j->type))
|
2016-05-16 17:24:51 +02:00
|
|
|
job_finish_and_invalidate(uj, JOB_CANCELED, false, false);
|
2012-04-20 09:38:43 +02:00
|
|
|
else {
|
|
|
|
/* not conflicting, i.e. mergeable */
|
|
|
|
|
2014-11-26 16:33:45 +01:00
|
|
|
if (uj->state == JOB_WAITING ||
|
2012-04-20 09:38:43 +02:00
|
|
|
(job_type_allows_late_merge(j->type) && job_type_is_superset(uj->type, j->type))) {
|
|
|
|
job_merge_into_installed(uj, j);
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(uj->unit,
|
2019-01-16 01:03:22 +01:00
|
|
|
"Merged %s/%s into installed job %s/%s as %"PRIu32,
|
2019-01-17 19:47:19 +01:00
|
|
|
j->unit->id, job_type_to_string(j->type), uj->unit->id,
|
2019-01-16 01:03:22 +01:00
|
|
|
job_type_to_string(uj->type), uj->id);
|
2012-04-20 09:38:43 +02:00
|
|
|
return uj;
|
|
|
|
} else {
|
|
|
|
/* already running and not safe to merge into */
|
|
|
|
/* Patch uj to become a merged job and re-run it. */
|
|
|
|
/* XXX It should be safer to queue j to run after uj finishes, but it is
|
|
|
|
* not currently possible to have more than one installed job per unit. */
|
|
|
|
job_merge_into_installed(uj, j);
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(uj->unit,
|
2019-01-16 01:03:22 +01:00
|
|
|
"Merged into running job, re-running: %s/%s as %"PRIu32,
|
|
|
|
uj->unit->id, job_type_to_string(uj->type), uj->id);
|
2015-01-05 17:22:10 +01:00
|
|
|
|
|
|
|
job_set_state(uj, JOB_WAITING);
|
2012-04-20 09:38:43 +02:00
|
|
|
return uj;
|
|
|
|
}
|
|
|
|
}
|
2012-04-20 02:04:01 +02:00
|
|
|
}
|
|
|
|
|
2012-04-20 09:38:43 +02:00
|
|
|
/* Install the job */
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
*pj = j;
|
2012-04-20 02:04:01 +02:00
|
|
|
j->installed = true;
|
2015-01-05 17:22:10 +01:00
|
|
|
|
2016-02-23 05:32:04 +01:00
|
|
|
j->manager->n_installed_jobs++;
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(j->unit,
|
2013-01-05 18:00:35 +01:00
|
|
|
"Installed new job %s/%s as %u",
|
|
|
|
j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
job_add_to_gc_queue(j);
|
|
|
|
|
2018-11-29 18:48:32 +01:00
|
|
|
job_add_to_dbus_queue(j); /* announce this job to clients */
|
|
|
|
unit_add_to_dbus_queue(j->unit); /* The Job property of the unit has changed now */
|
|
|
|
|
2012-04-20 09:38:43 +02:00
|
|
|
return j;
|
2012-04-20 02:04:01 +02:00
|
|
|
}
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
int job_install_deserialized(Job *j) {
|
|
|
|
Job **pj;
|
2018-12-10 19:40:37 +01:00
|
|
|
int r;
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
assert(!j->installed);
|
|
|
|
|
2018-11-20 23:40:44 +01:00
|
|
|
if (j->type < 0 || j->type >= _JOB_TYPE_MAX_IN_TRANSACTION)
|
2018-12-10 19:40:37 +01:00
|
|
|
return log_unit_debug_errno(j->unit, SYNTHETIC_ERRNO(EINVAL),
|
2018-11-20 23:40:44 +01:00
|
|
|
"Invalid job type %s in deserialization.",
|
|
|
|
strna(job_type_to_string(j->type)));
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
|
|
|
pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
2018-12-10 19:40:37 +01:00
|
|
|
if (*pj)
|
|
|
|
return log_unit_debug_errno(j->unit, SYNTHETIC_ERRNO(EEXIST),
|
|
|
|
"Unit already has a job installed. Not installing deserialized job.");
|
|
|
|
|
|
|
|
r = hashmap_put(j->manager->jobs, UINT32_TO_PTR(j->id), j);
|
|
|
|
if (r == -EEXIST)
|
|
|
|
return log_unit_debug_errno(j->unit, r, "Job ID %" PRIu32 " already used, cannot deserialize job.", j->id);
|
|
|
|
if (r < 0)
|
|
|
|
return log_unit_debug_errno(j->unit, r, "Failed to insert job into jobs hash table: %m");
|
2015-01-05 17:22:10 +01:00
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
*pj = j;
|
2012-04-23 01:24:04 +02:00
|
|
|
j->installed = true;
|
2015-01-05 17:22:10 +01:00
|
|
|
|
|
|
|
if (j->state == JOB_RUNNING)
|
|
|
|
j->unit->manager->n_running_jobs++;
|
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_debug(j->unit,
|
2013-01-05 18:00:35 +01:00
|
|
|
"Reinstalled deserialized job %s/%s as %u",
|
|
|
|
j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
return 0;
|
2012-04-23 01:24:04 +02:00
|
|
|
}
|
|
|
|
|
2012-04-18 15:21:24 +02:00
|
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
2010-01-20 02:12:51 +01:00
|
|
|
JobDependency *l;
|
|
|
|
|
|
|
|
assert(object);
|
|
|
|
|
|
|
|
/* Adds a new job link, which encodes that the 'subject' job
|
|
|
|
* needs the 'object' job in some way. If 'subject' is NULL
|
|
|
|
* this means the 'anchor' job (i.e. the one the user
|
2011-02-21 15:32:17 +01:00
|
|
|
* explicitly asked for) is the requester. */
|
2010-01-20 02:12:51 +01:00
|
|
|
|
2016-11-15 19:19:57 +01:00
|
|
|
l = new0(JobDependency, 1);
|
|
|
|
if (!l)
|
2010-01-20 02:12:51 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
l->subject = subject;
|
|
|
|
l->object = object;
|
|
|
|
l->matters = matters;
|
2010-08-09 22:32:30 +02:00
|
|
|
l->conflicts = conflicts;
|
2010-01-20 02:12:51 +01:00
|
|
|
|
2010-01-26 07:02:51 +01:00
|
|
|
if (subject)
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_PREPEND(subject, subject->subject_list, l);
|
2010-01-20 02:12:51 +01:00
|
|
|
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_PREPEND(object, object->object_list, l);
|
2010-01-20 02:12:51 +01:00
|
|
|
|
|
|
|
return l;
|
|
|
|
}
|
|
|
|
|
2012-04-18 15:21:24 +02:00
|
|
|
void job_dependency_free(JobDependency *l) {
|
2010-01-20 02:12:51 +01:00
|
|
|
assert(l);
|
|
|
|
|
2010-01-26 07:02:51 +01:00
|
|
|
if (l->subject)
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_REMOVE(subject, l->subject->subject_list, l);
|
2010-01-20 02:12:51 +01:00
|
|
|
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_REMOVE(object, l->object->object_list, l);
|
2010-01-20 02:12:51 +01:00
|
|
|
|
|
|
|
free(l);
|
|
|
|
}
|
|
|
|
|
2018-12-04 09:29:54 +01:00
|
|
|
void job_dump(Job *j, FILE *f, const char *prefix) {
|
2010-01-19 00:22:34 +01:00
|
|
|
assert(j);
|
|
|
|
assert(f);
|
|
|
|
|
2018-01-10 17:11:19 +01:00
|
|
|
prefix = strempty(prefix);
|
2010-05-20 01:14:09 +02:00
|
|
|
|
2010-01-20 02:35:46 +01:00
|
|
|
fprintf(f,
|
2010-04-23 20:25:55 +02:00
|
|
|
"%s-> Job %u:\n"
|
|
|
|
"%s\tAction: %s -> %s\n"
|
2010-01-23 01:52:57 +01:00
|
|
|
"%s\tState: %s\n"
|
2018-02-14 00:52:21 +01:00
|
|
|
"%s\tIrreversible: %s\n"
|
|
|
|
"%s\tMay GC: %s\n",
|
2010-01-20 02:35:46 +01:00
|
|
|
prefix, j->id,
|
2012-01-15 12:04:08 +01:00
|
|
|
prefix, j->unit->id, job_type_to_string(j->type),
|
2010-01-30 01:55:42 +01:00
|
|
|
prefix, job_state_to_string(j->state),
|
2018-02-14 00:52:21 +01:00
|
|
|
prefix, yes_no(j->irreversible),
|
|
|
|
prefix, yes_no(job_may_gc(j)));
|
2010-01-19 00:22:34 +01:00
|
|
|
}
|
2010-01-20 02:12:51 +01:00
|
|
|
|
2012-04-05 08:34:05 +02:00
|
|
|
/*
|
|
|
|
* Merging is commutative, so imagine the matrix as symmetric. We store only
|
|
|
|
* its lower triangle to avoid duplication. We don't store the main diagonal,
|
|
|
|
* because A merged with A is simply A.
|
|
|
|
*
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
* If the resulting type is collapsed immediately afterwards (to get rid of
|
|
|
|
* the JOB_RELOAD_OR_START, which lies outside the lookup function's domain),
|
|
|
|
* the following properties hold:
|
|
|
|
*
|
2015-03-11 14:53:16 +01:00
|
|
|
* Merging is associative! A merged with B, and then merged with C is the same
|
2015-03-12 12:23:59 +01:00
|
|
|
* as A merged with the result of B merged with C.
|
2012-04-05 08:34:05 +02:00
|
|
|
*
|
|
|
|
* Mergeability is transitive! If A can be merged with B and B with C then
|
|
|
|
* A also with C.
|
|
|
|
*
|
|
|
|
* Also, if A merged with B cannot be merged with C, then either A or B cannot
|
|
|
|
* be merged with C either.
|
|
|
|
*/
|
|
|
|
static const JobType job_merging_table[] = {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
/* What \ With * JOB_START JOB_VERIFY_ACTIVE JOB_STOP JOB_RELOAD */
|
|
|
|
/*********************************************************************************/
|
2012-04-05 08:34:05 +02:00
|
|
|
/*JOB_START */
|
|
|
|
/*JOB_VERIFY_ACTIVE */ JOB_START,
|
|
|
|
/*JOB_STOP */ -1, -1,
|
|
|
|
/*JOB_RELOAD */ JOB_RELOAD_OR_START, JOB_RELOAD, -1,
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
/*JOB_RESTART */ JOB_RESTART, JOB_RESTART, -1, JOB_RESTART,
|
2012-04-05 08:34:05 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
JobType job_type_lookup_merge(JobType a, JobType b) {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
assert_cc(ELEMENTSOF(job_merging_table) == _JOB_TYPE_MAX_MERGING * (_JOB_TYPE_MAX_MERGING - 1) / 2);
|
|
|
|
assert(a >= 0 && a < _JOB_TYPE_MAX_MERGING);
|
|
|
|
assert(b >= 0 && b < _JOB_TYPE_MAX_MERGING);
|
2010-01-21 00:51:37 +01:00
|
|
|
|
|
|
|
if (a == b)
|
2012-04-05 08:34:05 +02:00
|
|
|
return a;
|
2010-01-21 00:51:37 +01:00
|
|
|
|
2012-04-05 08:34:05 +02:00
|
|
|
if (a < b) {
|
|
|
|
JobType tmp = a;
|
|
|
|
a = b;
|
|
|
|
b = tmp;
|
2010-01-21 00:51:37 +01:00
|
|
|
}
|
2010-01-21 02:59:12 +01:00
|
|
|
|
2012-04-05 08:34:05 +02:00
|
|
|
return job_merging_table[(a - 1) * a / 2 + b];
|
2010-01-21 02:59:12 +01:00
|
|
|
}
|
2010-01-21 03:26:34 +01:00
|
|
|
|
2010-04-06 02:39:16 +02:00
|
|
|
bool job_type_is_redundant(JobType a, UnitActiveState b) {
|
|
|
|
switch (a) {
|
|
|
|
|
|
|
|
case JOB_START:
|
2017-09-29 00:37:23 +02:00
|
|
|
return IN_SET(b, UNIT_ACTIVE, UNIT_RELOADING);
|
2010-04-06 02:39:16 +02:00
|
|
|
|
|
|
|
case JOB_STOP:
|
2017-09-29 00:37:23 +02:00
|
|
|
return IN_SET(b, UNIT_INACTIVE, UNIT_FAILED);
|
2010-04-06 02:39:16 +02:00
|
|
|
|
|
|
|
case JOB_VERIFY_ACTIVE:
|
2017-09-29 00:37:23 +02:00
|
|
|
return IN_SET(b, UNIT_ACTIVE, UNIT_RELOADING);
|
2010-04-06 02:39:16 +02:00
|
|
|
|
|
|
|
case JOB_RELOAD:
|
|
|
|
return
|
2010-07-01 03:34:15 +02:00
|
|
|
b == UNIT_RELOADING;
|
2010-04-06 02:39:16 +02:00
|
|
|
|
|
|
|
case JOB_RESTART:
|
|
|
|
return
|
|
|
|
b == UNIT_ACTIVATING;
|
|
|
|
|
2014-11-26 16:33:43 +01:00
|
|
|
case JOB_NOP:
|
|
|
|
return true;
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
default:
|
|
|
|
assert_not_reached("Invalid job type");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-19 18:13:22 +02:00
|
|
|
JobType job_type_collapse(JobType t, Unit *u) {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
UnitActiveState s;
|
|
|
|
|
2015-05-19 18:13:22 +02:00
|
|
|
switch (t) {
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
2010-04-06 02:39:16 +02:00
|
|
|
case JOB_TRY_RESTART:
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
s = unit_active_state(u);
|
|
|
|
if (UNIT_IS_INACTIVE_OR_DEACTIVATING(s))
|
2015-05-19 18:13:22 +02:00
|
|
|
return JOB_NOP;
|
|
|
|
|
|
|
|
return JOB_RESTART;
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
|
2016-01-28 18:48:42 +01:00
|
|
|
case JOB_TRY_RELOAD:
|
|
|
|
s = unit_active_state(u);
|
|
|
|
if (UNIT_IS_INACTIVE_OR_DEACTIVATING(s))
|
|
|
|
return JOB_NOP;
|
|
|
|
|
|
|
|
return JOB_RELOAD;
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
case JOB_RELOAD_OR_START:
|
|
|
|
s = unit_active_state(u);
|
|
|
|
if (UNIT_IS_INACTIVE_OR_DEACTIVATING(s))
|
2015-05-19 18:13:22 +02:00
|
|
|
return JOB_START;
|
|
|
|
|
|
|
|
return JOB_RELOAD;
|
2010-04-06 02:39:16 +02:00
|
|
|
|
|
|
|
default:
|
2015-05-19 18:13:22 +02:00
|
|
|
return t;
|
2010-04-06 02:39:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
int job_type_merge_and_collapse(JobType *a, JobType b, Unit *u) {
|
2015-05-19 18:13:22 +02:00
|
|
|
JobType t;
|
|
|
|
|
|
|
|
t = job_type_lookup_merge(*a, b);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
if (t < 0)
|
|
|
|
return -EEXIST;
|
2015-05-19 18:13:22 +02:00
|
|
|
|
|
|
|
*a = job_type_collapse(t, u);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-08 18:11:09 +01:00
|
|
|
static bool job_is_runnable(Job *j) {
|
2010-01-26 04:18:44 +01:00
|
|
|
Iterator i;
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *other;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2010-01-23 01:52:57 +01:00
|
|
|
|
|
|
|
assert(j);
|
2010-01-26 19:25:02 +01:00
|
|
|
assert(j->installed);
|
2010-01-23 01:52:57 +01:00
|
|
|
|
2010-01-26 21:39:06 +01:00
|
|
|
/* Checks whether there is any job running for the units this
|
2010-01-23 01:52:57 +01:00
|
|
|
* job needs to be running after (in the case of a 'positive'
|
2011-02-16 21:59:31 +01:00
|
|
|
* job type) or before (in the case of a 'negative' job
|
|
|
|
* type. */
|
|
|
|
|
2013-11-22 19:14:11 +01:00
|
|
|
/* Note that unit types have a say in what is runnable,
|
|
|
|
* too. For example, if they return -EAGAIN from
|
|
|
|
* unit_start() they can indicate they are not
|
|
|
|
* runnable yet. */
|
|
|
|
|
2011-02-16 21:59:31 +01:00
|
|
|
/* First check if there is an override */
|
2011-04-06 19:09:33 +02:00
|
|
|
if (j->ignore_order)
|
2011-02-16 21:59:31 +01:00
|
|
|
return true;
|
2010-01-23 01:52:57 +01:00
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
if (j->type == JOB_NOP)
|
|
|
|
return true;
|
|
|
|
|
2016-11-15 19:19:57 +01:00
|
|
|
if (IN_SET(j->type, JOB_START, JOB_VERIFY_ACTIVE, JOB_RELOAD)) {
|
2010-01-23 01:52:57 +01:00
|
|
|
/* Immediate result is that the job is or might be
|
2015-07-07 02:09:54 +02:00
|
|
|
* started. In this case let's wait for the
|
2010-01-23 01:52:57 +01:00
|
|
|
* dependencies, regardless whether they are
|
|
|
|
* starting or stopping something. */
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_AFTER], i)
|
2012-01-15 12:04:08 +01:00
|
|
|
if (other->job)
|
2010-01-23 01:52:57 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Also, if something else is being stopped and we should
|
2015-07-07 02:09:54 +02:00
|
|
|
* change state after it, then let's wait. */
|
2010-01-23 01:52:57 +01:00
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_BEFORE], i)
|
2012-01-15 12:04:08 +01:00
|
|
|
if (other->job &&
|
2016-11-15 19:19:57 +01:00
|
|
|
IN_SET(other->job->type, JOB_STOP, JOB_RESTART))
|
2010-01-23 01:52:57 +01:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/* This means that for a service a and a service b where b
|
|
|
|
* shall be started after a:
|
|
|
|
*
|
|
|
|
* start a + start b → 1st step start a, 2nd step start b
|
|
|
|
* start a + stop b → 1st step stop b, 2nd step start a
|
|
|
|
* stop a + start b → 1st step stop a, 2nd step start b
|
|
|
|
* stop a + stop b → 1st step stop b, 2nd step stop a
|
|
|
|
*
|
|
|
|
* This has the side effect that restarts are properly
|
|
|
|
* synchronized too. */
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-03-28 01:26:04 +02:00
|
|
|
static void job_change_type(Job *j, JobType newtype) {
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
assert(j);
|
|
|
|
|
|
|
|
log_unit_debug(j->unit,
|
2013-01-05 18:00:35 +01:00
|
|
|
"Converting job %s/%s -> %s/%s",
|
|
|
|
j->unit->id, job_type_to_string(j->type),
|
|
|
|
j->unit->id, job_type_to_string(newtype));
|
2012-03-28 01:26:04 +02:00
|
|
|
|
|
|
|
j->type = newtype;
|
|
|
|
}
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
_pure_ static const char* job_get_begin_status_message_format(Unit *u, JobType t) {
|
2018-11-14 11:01:14 +01:00
|
|
|
const char *format;
|
2018-11-13 19:57:43 +01:00
|
|
|
|
|
|
|
assert(u);
|
2018-11-14 11:01:14 +01:00
|
|
|
|
|
|
|
if (t == JOB_RELOAD)
|
|
|
|
return "Reloading %s.";
|
|
|
|
|
|
|
|
assert(IN_SET(t, JOB_START, JOB_STOP));
|
|
|
|
|
2018-11-16 15:28:26 +01:00
|
|
|
format = UNIT_VTABLE(u)->status_message_formats.starting_stopping[t == JOB_STOP];
|
|
|
|
if (format)
|
|
|
|
return format;
|
2018-11-13 19:57:43 +01:00
|
|
|
|
|
|
|
/* Return generic strings */
|
|
|
|
if (t == JOB_START)
|
|
|
|
return "Starting %s.";
|
2018-11-14 11:01:14 +01:00
|
|
|
else {
|
|
|
|
assert(t == JOB_STOP);
|
2018-11-13 19:57:43 +01:00
|
|
|
return "Stopping %s.";
|
2018-11-14 11:01:14 +01:00
|
|
|
}
|
2018-11-13 19:57:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static void job_print_begin_status_message(Unit *u, JobType t) {
|
|
|
|
const char *format;
|
|
|
|
|
|
|
|
assert(u);
|
|
|
|
|
|
|
|
/* Reload status messages have traditionally not been printed to console. */
|
|
|
|
if (!IN_SET(t, JOB_START, JOB_STOP))
|
|
|
|
return;
|
|
|
|
|
|
|
|
format = job_get_begin_status_message_format(u, t);
|
|
|
|
|
|
|
|
DISABLE_WARNING_FORMAT_NONLITERAL;
|
|
|
|
unit_status_printf(u, "", format);
|
|
|
|
REENABLE_WARNING;
|
|
|
|
}
|
|
|
|
|
2018-11-13 20:16:45 +01:00
|
|
|
static void job_log_begin_status_message(Unit *u, uint32_t job_id, JobType t) {
|
2018-11-13 19:57:43 +01:00
|
|
|
const char *format, *mid;
|
|
|
|
char buf[LINE_MAX];
|
|
|
|
|
|
|
|
assert(u);
|
2018-11-13 20:16:45 +01:00
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
2018-11-13 19:57:43 +01:00
|
|
|
|
|
|
|
if (!IN_SET(t, JOB_START, JOB_STOP, JOB_RELOAD))
|
|
|
|
return;
|
|
|
|
|
2018-11-14 11:01:28 +01:00
|
|
|
if (log_on_console()) /* Skip this if it would only go on the console anyway */
|
2018-11-13 19:57:43 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* We log status messages for all units and all operations. */
|
|
|
|
|
|
|
|
format = job_get_begin_status_message_format(u, t);
|
|
|
|
|
|
|
|
DISABLE_WARNING_FORMAT_NONLITERAL;
|
|
|
|
(void) snprintf(buf, sizeof buf, format, unit_description(u));
|
|
|
|
REENABLE_WARNING;
|
|
|
|
|
|
|
|
mid = t == JOB_START ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTING_STR :
|
|
|
|
t == JOB_STOP ? "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPING_STR :
|
|
|
|
"MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADING_STR;
|
|
|
|
|
|
|
|
/* Note that we deliberately use LOG_MESSAGE() instead of
|
|
|
|
* LOG_UNIT_MESSAGE() here, since this is supposed to mimic
|
|
|
|
* closely what is written to screen using the status output,
|
|
|
|
* which is supposed the highest level, friendliest output
|
|
|
|
* possible, which means we should avoid the low-level unit
|
|
|
|
* name. */
|
|
|
|
log_struct(LOG_INFO,
|
|
|
|
LOG_MESSAGE("%s", buf),
|
2018-11-13 20:16:45 +01:00
|
|
|
"JOB_ID=%" PRIu32, job_id,
|
|
|
|
"JOB_TYPE=%s", job_type_to_string(t),
|
2018-11-13 19:57:43 +01:00
|
|
|
LOG_UNIT_ID(u),
|
|
|
|
LOG_UNIT_INVOCATION_ID(u),
|
|
|
|
mid);
|
|
|
|
}
|
|
|
|
|
2018-11-13 20:16:45 +01:00
|
|
|
static void job_emit_begin_status_message(Unit *u, uint32_t job_id, JobType t) {
|
2018-11-13 19:57:43 +01:00
|
|
|
assert(u);
|
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
|
|
|
|
2018-11-13 20:16:45 +01:00
|
|
|
job_log_begin_status_message(u, job_id, t);
|
2018-11-13 19:57:43 +01:00
|
|
|
job_print_begin_status_message(u, t);
|
|
|
|
}
|
|
|
|
|
2015-07-16 20:08:30 +02:00
|
|
|
static int job_perform_on_unit(Job **j) {
|
2015-11-17 17:11:44 +01:00
|
|
|
uint32_t id;
|
|
|
|
Manager *m;
|
|
|
|
JobType t;
|
|
|
|
Unit *u;
|
2015-07-16 20:08:30 +02:00
|
|
|
int r;
|
|
|
|
|
2015-11-17 17:11:44 +01:00
|
|
|
/* While we execute this operation the job might go away (for
|
|
|
|
* example: because it finishes immediately or is replaced by
|
|
|
|
* a new, conflicting job.) To make sure we don't access a
|
|
|
|
* freed job later on we store the id here, so that we can
|
|
|
|
* verify the job is still valid. */
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
assert(*j);
|
|
|
|
|
|
|
|
m = (*j)->manager;
|
|
|
|
u = (*j)->unit;
|
|
|
|
t = (*j)->type;
|
|
|
|
id = (*j)->id;
|
|
|
|
|
2015-07-16 20:08:30 +02:00
|
|
|
switch (t) {
|
|
|
|
case JOB_START:
|
|
|
|
r = unit_start(u);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case JOB_RESTART:
|
|
|
|
t = JOB_STOP;
|
2017-11-19 19:06:10 +01:00
|
|
|
_fallthrough_;
|
2015-07-16 20:08:30 +02:00
|
|
|
case JOB_STOP:
|
|
|
|
r = unit_stop(u);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case JOB_RELOAD:
|
|
|
|
r = unit_reload(u);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
assert_not_reached("Invalid job type");
|
|
|
|
}
|
|
|
|
|
2018-11-14 11:01:28 +01:00
|
|
|
/* Log if the job still exists and the start/stop/reload function actually did something. Note that this means
|
|
|
|
* for units for which there's no 'activating' phase (i.e. because we transition directly from 'inactive' to
|
|
|
|
* 'active') we'll possibly skip the "Starting..." message. */
|
2015-07-16 20:08:30 +02:00
|
|
|
*j = manager_get_job(m, id);
|
|
|
|
if (*j && r > 0)
|
2018-11-13 20:16:45 +01:00
|
|
|
job_emit_begin_status_message(u, id, t);
|
2015-07-16 20:08:30 +02:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2010-01-23 01:52:57 +01:00
|
|
|
int job_run_and_invalidate(Job *j) {
|
|
|
|
int r;
|
2010-01-26 19:25:02 +01:00
|
|
|
|
2010-01-23 01:52:57 +01:00
|
|
|
assert(j);
|
2010-01-26 19:25:02 +01:00
|
|
|
assert(j->installed);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
2012-05-12 21:06:27 +02:00
|
|
|
assert(j->in_run_queue);
|
2010-01-23 01:52:57 +01:00
|
|
|
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_REMOVE(run_queue, j->manager->run_queue, j);
|
2012-05-12 21:06:27 +02:00
|
|
|
j->in_run_queue = false;
|
2010-01-23 01:52:57 +01:00
|
|
|
|
|
|
|
if (j->state != JOB_WAITING)
|
|
|
|
return 0;
|
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
if (!job_is_runnable(j))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2017-02-17 17:47:20 +01:00
|
|
|
job_start_timer(j, true);
|
2015-01-05 17:22:10 +01:00
|
|
|
job_set_state(j, JOB_RUNNING);
|
2010-02-05 00:38:41 +01:00
|
|
|
job_add_to_dbus_queue(j);
|
2010-01-23 22:56:47 +01:00
|
|
|
|
2010-01-23 01:52:57 +01:00
|
|
|
switch (j->type) {
|
|
|
|
|
|
|
|
case JOB_VERIFY_ACTIVE: {
|
2018-11-13 19:34:30 +01:00
|
|
|
UnitActiveState t;
|
|
|
|
|
|
|
|
t = unit_active_state(j->unit);
|
2010-01-26 21:39:06 +01:00
|
|
|
if (UNIT_IS_ACTIVE_OR_RELOADING(t))
|
2010-01-23 01:52:57 +01:00
|
|
|
r = -EALREADY;
|
2010-01-26 21:39:06 +01:00
|
|
|
else if (t == UNIT_ACTIVATING)
|
2010-01-23 01:52:57 +01:00
|
|
|
r = -EAGAIN;
|
|
|
|
else
|
2013-12-03 03:52:51 +01:00
|
|
|
r = -EBADR;
|
2010-01-23 01:52:57 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-07-16 20:08:30 +02:00
|
|
|
case JOB_START:
|
2010-01-23 01:52:57 +01:00
|
|
|
case JOB_STOP:
|
2012-03-28 00:42:27 +02:00
|
|
|
case JOB_RESTART:
|
2015-07-16 20:08:30 +02:00
|
|
|
r = job_perform_on_unit(&j);
|
2010-11-14 21:57:10 +01:00
|
|
|
|
2018-11-13 19:34:30 +01:00
|
|
|
/* If the unit type does not support starting/stopping, then simply wait. */
|
2010-11-14 21:57:10 +01:00
|
|
|
if (r == -EBADR)
|
|
|
|
r = 0;
|
2010-01-23 01:52:57 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case JOB_RELOAD:
|
2015-07-16 20:08:30 +02:00
|
|
|
r = job_perform_on_unit(&j);
|
2010-01-23 01:52:57 +01:00
|
|
|
break;
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
case JOB_NOP:
|
|
|
|
r = -EALREADY;
|
|
|
|
break;
|
|
|
|
|
2010-01-23 01:52:57 +01:00
|
|
|
default:
|
2010-01-26 07:02:51 +01:00
|
|
|
assert_not_reached("Unknown job type");
|
2010-01-23 01:52:57 +01:00
|
|
|
}
|
|
|
|
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
if (j) {
|
2018-11-13 19:39:04 +01:00
|
|
|
if (r == -EAGAIN)
|
|
|
|
job_set_state(j, JOB_WAITING); /* Hmm, not ready after all, let's return to JOB_WAITING state */
|
2018-11-14 11:38:51 +01:00
|
|
|
else if (r == -EALREADY) /* already being executed */
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_DONE, true, true);
|
2018-11-14 11:38:51 +01:00
|
|
|
else if (r == -ECOMM) /* condition failed, but all is good */
|
|
|
|
r = job_finish_and_invalidate(j, JOB_DONE, true, false);
|
2013-12-03 03:52:51 +01:00
|
|
|
else if (r == -EBADR)
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_SKIPPED, true, false);
|
2013-12-03 03:52:51 +01:00
|
|
|
else if (r == -ENOEXEC)
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_INVALID, true, false);
|
2014-11-06 13:43:45 +01:00
|
|
|
else if (r == -EPROTO)
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_ASSERT, true, false);
|
2015-03-13 14:08:00 +01:00
|
|
|
else if (r == -EOPNOTSUPP)
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_UNSUPPORTED, true, false);
|
2016-11-24 18:47:48 +01:00
|
|
|
else if (r == -ENOLINK)
|
|
|
|
r = job_finish_and_invalidate(j, JOB_DEPENDENCY, true, false);
|
2018-04-27 20:35:10 +02:00
|
|
|
else if (r == -ESTALE)
|
|
|
|
r = job_finish_and_invalidate(j, JOB_ONCE, true, false);
|
2015-01-05 17:22:10 +01:00
|
|
|
else if (r < 0)
|
2016-05-16 17:24:51 +02:00
|
|
|
r = job_finish_and_invalidate(j, JOB_FAILED, true, false);
|
2010-08-13 19:29:21 +02:00
|
|
|
}
|
2010-01-23 01:52:57 +01:00
|
|
|
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
_pure_ static const char *job_get_done_status_message_format(Unit *u, JobType t, JobResult result) {
|
2015-11-17 17:11:44 +01:00
|
|
|
|
2015-07-21 14:54:24 +02:00
|
|
|
static const char *const generic_finished_start_job[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_DONE] = "Started %s.",
|
|
|
|
[JOB_TIMEOUT] = "Timed out starting %s.",
|
|
|
|
[JOB_FAILED] = "Failed to start %s.",
|
|
|
|
[JOB_DEPENDENCY] = "Dependency failed for %s.",
|
|
|
|
[JOB_ASSERT] = "Assertion failed for %s.",
|
|
|
|
[JOB_UNSUPPORTED] = "Starting of %s not supported.",
|
2016-12-15 16:00:28 +01:00
|
|
|
[JOB_COLLECTED] = "Unnecessary job for %s was removed.",
|
2018-04-27 20:35:10 +02:00
|
|
|
[JOB_ONCE] = "Unit %s has been started before and cannot be started again."
|
2015-07-21 14:54:24 +02:00
|
|
|
};
|
|
|
|
static const char *const generic_finished_stop_job[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_DONE] = "Stopped %s.",
|
|
|
|
[JOB_FAILED] = "Stopped (with error) %s.",
|
2016-03-23 08:46:39 +01:00
|
|
|
[JOB_TIMEOUT] = "Timed out stopping %s.",
|
2015-07-21 14:54:24 +02:00
|
|
|
};
|
|
|
|
static const char *const generic_finished_reload_job[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_DONE] = "Reloaded %s.",
|
|
|
|
[JOB_FAILED] = "Reload failed for %s.",
|
|
|
|
[JOB_TIMEOUT] = "Timed out reloading %s.",
|
|
|
|
};
|
|
|
|
/* When verify-active detects the unit is inactive, report it.
|
|
|
|
* Most likely a DEPEND warning from a requisiting unit will
|
|
|
|
* occur next and it's nice to see what was requisited. */
|
|
|
|
static const char *const generic_finished_verify_active_job[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_SKIPPED] = "%s is not active.",
|
|
|
|
};
|
2012-08-24 22:21:20 +02:00
|
|
|
|
2015-11-17 17:11:44 +01:00
|
|
|
const char *format;
|
|
|
|
|
2012-08-24 22:21:20 +02:00
|
|
|
assert(u);
|
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
2012-05-13 18:18:54 +02:00
|
|
|
|
2015-11-17 17:11:44 +01:00
|
|
|
if (IN_SET(t, JOB_START, JOB_STOP, JOB_RESTART)) {
|
2018-11-16 15:28:26 +01:00
|
|
|
format = t == JOB_START ?
|
|
|
|
UNIT_VTABLE(u)->status_message_formats.finished_start_job[result] :
|
|
|
|
UNIT_VTABLE(u)->status_message_formats.finished_stop_job[result];
|
|
|
|
if (format)
|
|
|
|
return format;
|
2015-07-21 14:54:24 +02:00
|
|
|
}
|
2012-08-24 22:21:20 +02:00
|
|
|
|
2015-07-21 14:54:24 +02:00
|
|
|
/* Return generic strings */
|
2012-08-24 22:21:20 +02:00
|
|
|
if (t == JOB_START)
|
2015-07-21 14:54:24 +02:00
|
|
|
return generic_finished_start_job[result];
|
2017-10-04 16:01:32 +02:00
|
|
|
else if (IN_SET(t, JOB_STOP, JOB_RESTART))
|
2015-07-21 14:54:24 +02:00
|
|
|
return generic_finished_stop_job[result];
|
|
|
|
else if (t == JOB_RELOAD)
|
|
|
|
return generic_finished_reload_job[result];
|
|
|
|
else if (t == JOB_VERIFY_ACTIVE)
|
|
|
|
return generic_finished_verify_active_job[result];
|
2012-08-24 22:21:20 +02:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-07-02 01:05:36 +02:00
|
|
|
static const struct {
|
|
|
|
const char *color, *word;
|
2018-11-13 19:57:43 +01:00
|
|
|
} job_print_done_status_messages[_JOB_RESULT_MAX] = {
|
2018-03-01 13:12:02 +01:00
|
|
|
[JOB_DONE] = { ANSI_OK_COLOR, " OK " },
|
2017-07-02 01:05:36 +02:00
|
|
|
[JOB_TIMEOUT] = { ANSI_HIGHLIGHT_RED, " TIME " },
|
|
|
|
[JOB_FAILED] = { ANSI_HIGHLIGHT_RED, "FAILED" },
|
|
|
|
[JOB_DEPENDENCY] = { ANSI_HIGHLIGHT_YELLOW, "DEPEND" },
|
|
|
|
[JOB_SKIPPED] = { ANSI_HIGHLIGHT, " INFO " },
|
|
|
|
[JOB_ASSERT] = { ANSI_HIGHLIGHT_YELLOW, "ASSERT" },
|
|
|
|
[JOB_UNSUPPORTED] = { ANSI_HIGHLIGHT_YELLOW, "UNSUPP" },
|
|
|
|
/* JOB_COLLECTED */
|
2018-04-27 20:35:10 +02:00
|
|
|
[JOB_ONCE] = { ANSI_HIGHLIGHT_RED, " ONCE " },
|
2017-07-02 01:05:36 +02:00
|
|
|
};
|
2011-04-16 03:07:38 +02:00
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
static void job_print_done_status_message(Unit *u, JobType t, JobResult result) {
|
2015-11-17 17:11:44 +01:00
|
|
|
const char *format;
|
2016-03-16 14:27:37 +01:00
|
|
|
const char *status;
|
2015-11-17 17:11:44 +01:00
|
|
|
|
2012-08-24 22:21:20 +02:00
|
|
|
assert(u);
|
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
|
|
|
|
2015-11-17 17:11:44 +01:00
|
|
|
/* Reload status messages have traditionally not been printed to console. */
|
|
|
|
if (t == JOB_RELOAD)
|
|
|
|
return;
|
|
|
|
|
2018-11-14 11:08:16 +01:00
|
|
|
/* No message if the job did not actually do anything due to failed condition. */
|
|
|
|
if (t == JOB_START && result == JOB_DONE && !u->condition_result)
|
|
|
|
return;
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
if (!job_print_done_status_messages[result].word)
|
2017-07-02 01:05:36 +02:00
|
|
|
return;
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
format = job_get_done_status_message_format(u, t, result);
|
2015-07-21 14:54:24 +02:00
|
|
|
if (!format)
|
|
|
|
return;
|
2011-04-16 03:07:38 +02:00
|
|
|
|
2016-03-16 14:27:37 +01:00
|
|
|
if (log_get_show_color())
|
2018-11-13 19:57:43 +01:00
|
|
|
status = strjoina(job_print_done_status_messages[result].color,
|
|
|
|
job_print_done_status_messages[result].word,
|
2017-07-02 01:05:36 +02:00
|
|
|
ANSI_NORMAL);
|
2016-03-16 14:27:37 +01:00
|
|
|
else
|
2018-11-13 19:57:43 +01:00
|
|
|
status = job_print_done_status_messages[result].word;
|
2016-03-16 14:27:37 +01:00
|
|
|
|
2015-07-21 14:54:24 +02:00
|
|
|
if (result != JOB_DONE)
|
|
|
|
manager_flip_auto_status(u->manager, true);
|
2011-04-16 03:07:38 +02:00
|
|
|
|
2015-07-21 14:54:24 +02:00
|
|
|
DISABLE_WARNING_FORMAT_NONLITERAL;
|
2016-03-16 14:27:37 +01:00
|
|
|
unit_status_printf(u, status, format);
|
2015-07-21 14:54:24 +02:00
|
|
|
REENABLE_WARNING;
|
2012-05-13 23:29:19 +02:00
|
|
|
|
2015-07-21 14:54:24 +02:00
|
|
|
if (t == JOB_START && result == JOB_FAILED) {
|
2015-11-17 17:11:44 +01:00
|
|
|
_cleanup_free_ char *quoted;
|
2012-05-13 23:29:19 +02:00
|
|
|
|
2017-06-11 21:24:07 +02:00
|
|
|
quoted = shell_maybe_quote(u->id, ESCAPE_BACKSLASH);
|
2015-11-17 17:11:44 +01:00
|
|
|
manager_status_printf(u->manager, STATUS_TYPE_NORMAL, NULL, "See 'systemctl status %s' for details.", strna(quoted));
|
2011-04-16 03:07:38 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
static void job_log_done_status_message(Unit *u, uint32_t job_id, JobType t, JobResult result) {
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
const char *format, *mid;
|
2012-08-24 22:21:20 +02:00
|
|
|
char buf[LINE_MAX];
|
2015-07-21 19:07:24 +02:00
|
|
|
static const int job_result_log_level[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_DONE] = LOG_INFO,
|
|
|
|
[JOB_CANCELED] = LOG_INFO,
|
|
|
|
[JOB_TIMEOUT] = LOG_ERR,
|
|
|
|
[JOB_FAILED] = LOG_ERR,
|
|
|
|
[JOB_DEPENDENCY] = LOG_WARNING,
|
|
|
|
[JOB_SKIPPED] = LOG_NOTICE,
|
|
|
|
[JOB_INVALID] = LOG_INFO,
|
|
|
|
[JOB_ASSERT] = LOG_WARNING,
|
|
|
|
[JOB_UNSUPPORTED] = LOG_WARNING,
|
2016-11-15 19:32:50 +01:00
|
|
|
[JOB_COLLECTED] = LOG_INFO,
|
2018-04-27 20:35:10 +02:00
|
|
|
[JOB_ONCE] = LOG_ERR,
|
2015-07-21 19:07:24 +02:00
|
|
|
};
|
2012-08-24 22:21:20 +02:00
|
|
|
|
|
|
|
assert(u);
|
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
|
|
|
|
2017-07-02 01:05:36 +02:00
|
|
|
/* Skip printing if output goes to the console, and job_print_status_message()
|
|
|
|
will actually print something to the console. */
|
2018-11-13 19:57:43 +01:00
|
|
|
if (log_on_console() && job_print_done_status_messages[result].word)
|
2012-08-24 22:43:33 +02:00
|
|
|
return;
|
|
|
|
|
2018-11-14 11:08:16 +01:00
|
|
|
/* Show condition check message if the job did not actually do anything due to failed condition. */
|
|
|
|
if (t == JOB_START && result == JOB_DONE && !u->condition_result) {
|
|
|
|
log_struct(LOG_INFO,
|
|
|
|
"MESSAGE=Condition check resulted in %s being skipped.", unit_description(u),
|
|
|
|
"JOB_ID=%" PRIu32, job_id,
|
|
|
|
"JOB_TYPE=%s", job_type_to_string(t),
|
|
|
|
"JOB_RESULT=%s", job_result_to_string(result),
|
|
|
|
LOG_UNIT_ID(u),
|
|
|
|
LOG_UNIT_INVOCATION_ID(u),
|
|
|
|
"MESSAGE_ID=" SD_MESSAGE_UNIT_STARTED_STR);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
format = job_get_done_status_message_format(u, t, result);
|
2012-08-24 22:21:20 +02:00
|
|
|
if (!format)
|
|
|
|
return;
|
|
|
|
|
2018-02-16 14:32:44 +01:00
|
|
|
/* The description might be longer than the buffer, but that's OK,
|
|
|
|
* we'll just truncate it here. Note that we use snprintf() rather than
|
|
|
|
* xsprintf() on purpose here: we are fine with truncation and don't
|
|
|
|
* consider that an error. */
|
2014-02-20 18:18:32 +01:00
|
|
|
DISABLE_WARNING_FORMAT_NONLITERAL;
|
2018-02-16 14:32:44 +01:00
|
|
|
(void) snprintf(buf, sizeof(buf), format, unit_description(u));
|
2014-02-20 18:18:32 +01:00
|
|
|
REENABLE_WARNING;
|
2012-08-24 22:21:20 +02:00
|
|
|
|
2015-11-17 17:11:44 +01:00
|
|
|
switch (t) {
|
|
|
|
|
|
|
|
case JOB_START:
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
if (result == JOB_DONE)
|
|
|
|
mid = "MESSAGE_ID=" SD_MESSAGE_UNIT_STARTED_STR;
|
|
|
|
else
|
|
|
|
mid = "MESSAGE_ID=" SD_MESSAGE_UNIT_FAILED_STR;
|
2015-11-17 17:11:44 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case JOB_RELOAD:
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
mid = "MESSAGE_ID=" SD_MESSAGE_UNIT_RELOADED_STR;
|
2015-11-17 17:11:44 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
case JOB_STOP:
|
|
|
|
case JOB_RESTART:
|
tree-wide: add SD_ID128_MAKE_STR, remove LOG_MESSAGE_ID
Embedding sd_id128_t's in constant strings was rather cumbersome. We had
SD_ID128_CONST_STR which returned a const char[], but it had two problems:
- it wasn't possible to statically concatanate this array with a normal string
- gcc wasn't really able to optimize this, and generated code to perform the
"conversion" at runtime.
Because of this, even our own code in coredumpctl wasn't using
SD_ID128_CONST_STR.
Add a new macro to generate a constant string: SD_ID128_MAKE_STR.
It is not as elegant as SD_ID128_CONST_STR, because it requires a repetition
of the numbers, but in practice it is more convenient to use, and allows gcc
to generate smarter code:
$ size .libs/systemd{,-logind,-journald}{.old,}
text data bss dec hex filename
1265204 149564 4808 1419576 15a938 .libs/systemd.old
1260268 149564 4808 1414640 1595f0 .libs/systemd
246805 13852 209 260866 3fb02 .libs/systemd-logind.old
240973 13852 209 255034 3e43a .libs/systemd-logind
146839 4984 34 151857 25131 .libs/systemd-journald.old
146391 4984 34 151409 24f71 .libs/systemd-journald
It is also much easier to check if a certain binary uses a certain MESSAGE_ID:
$ strings .libs/systemd.old|grep MESSAGE_ID
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
MESSAGE_ID=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x
$ strings .libs/systemd|grep MESSAGE_ID
MESSAGE_ID=c7a787079b354eaaa9e77b371893cd27
MESSAGE_ID=b07a249cd024414a82dd00cd181378ff
MESSAGE_ID=641257651c1b4ec9a8624d7a40a9e1e7
MESSAGE_ID=de5b426a63be47a7b6ac3eaac82e2f6f
MESSAGE_ID=d34d037fff1847e6ae669a370e694725
MESSAGE_ID=7d4958e842da4a758f6c1cdc7b36dcc5
MESSAGE_ID=1dee0369c7fc4736b7099b38ecb46ee7
MESSAGE_ID=39f53479d3a045ac8e11786248231fbf
MESSAGE_ID=be02cf6855d2428ba40df7e9d022f03d
MESSAGE_ID=7b05ebc668384222baa8881179cfda54
MESSAGE_ID=9d1aaa27d60140bd96365438aad20286
2016-11-06 18:48:23 +01:00
|
|
|
mid = "MESSAGE_ID=" SD_MESSAGE_UNIT_STOPPED_STR;
|
2015-11-17 17:11:44 +01:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2015-07-21 19:07:24 +02:00
|
|
|
log_struct(job_result_log_level[result],
|
2015-07-21 16:20:18 +02:00
|
|
|
LOG_MESSAGE("%s", buf),
|
2018-11-13 19:28:54 +01:00
|
|
|
"JOB_ID=%" PRIu32, job_id,
|
2017-09-20 18:29:08 +02:00
|
|
|
"JOB_TYPE=%s", job_type_to_string(t),
|
|
|
|
"JOB_RESULT=%s", job_result_to_string(result),
|
2017-04-20 20:15:28 +02:00
|
|
|
LOG_UNIT_ID(u),
|
2018-06-04 12:59:22 +02:00
|
|
|
LOG_UNIT_INVOCATION_ID(u));
|
2015-07-21 17:26:28 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-07-21 19:07:24 +02:00
|
|
|
log_struct(job_result_log_level[result],
|
2015-07-21 17:26:28 +02:00
|
|
|
LOG_MESSAGE("%s", buf),
|
2018-11-13 19:28:54 +01:00
|
|
|
"JOB_ID=%" PRIu32, job_id,
|
2017-09-20 18:29:08 +02:00
|
|
|
"JOB_TYPE=%s", job_type_to_string(t),
|
|
|
|
"JOB_RESULT=%s", job_result_to_string(result),
|
2017-04-20 20:15:28 +02:00
|
|
|
LOG_UNIT_ID(u),
|
2017-09-20 18:27:53 +02:00
|
|
|
LOG_UNIT_INVOCATION_ID(u),
|
2018-06-04 12:59:22 +02:00
|
|
|
mid);
|
2012-08-24 22:21:20 +02:00
|
|
|
}
|
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
static void job_emit_done_status_message(Unit *u, uint32_t job_id, JobType t, JobResult result) {
|
2017-09-20 18:29:08 +02:00
|
|
|
assert(u);
|
2015-07-21 16:15:19 +02:00
|
|
|
|
2018-11-13 19:57:43 +01:00
|
|
|
job_log_done_status_message(u, job_id, t, result);
|
|
|
|
job_print_done_status_message(u, t, result);
|
2015-07-21 16:15:19 +02:00
|
|
|
}
|
|
|
|
|
2015-05-19 01:24:28 +02:00
|
|
|
static void job_fail_dependencies(Unit *u, UnitDependency d) {
|
|
|
|
Unit *other;
|
|
|
|
Iterator i;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2015-05-19 01:24:28 +02:00
|
|
|
|
|
|
|
assert(u);
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, u->dependencies[d], i) {
|
2015-05-19 01:24:28 +02:00
|
|
|
Job *j = other->job;
|
|
|
|
|
|
|
|
if (!j)
|
|
|
|
continue;
|
|
|
|
if (!IN_SET(j->type, JOB_START, JOB_VERIFY_ACTIVE))
|
|
|
|
continue;
|
|
|
|
|
2016-05-16 17:24:51 +02:00
|
|
|
job_finish_and_invalidate(j, JOB_DEPENDENCY, true, false);
|
2015-05-19 01:24:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-16 17:24:51 +02:00
|
|
|
int job_finish_and_invalidate(Job *j, JobResult result, bool recursive, bool already) {
|
2010-01-26 21:39:06 +01:00
|
|
|
Unit *u;
|
|
|
|
Unit *other;
|
2010-03-31 20:08:05 +02:00
|
|
|
JobType t;
|
2010-01-26 04:18:44 +01:00
|
|
|
Iterator i;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2010-01-23 01:52:57 +01:00
|
|
|
|
|
|
|
assert(j);
|
2010-01-26 19:25:02 +01:00
|
|
|
assert(j->installed);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
2010-01-23 01:52:57 +01:00
|
|
|
|
2012-05-13 18:18:54 +02:00
|
|
|
u = j->unit;
|
|
|
|
t = j->type;
|
|
|
|
|
|
|
|
j->result = result;
|
|
|
|
|
2018-11-13 19:28:54 +01:00
|
|
|
log_unit_debug(u, "Job %" PRIu32 " %s/%s finished, result=%s", j->id, u->id, job_type_to_string(t), job_result_to_string(result));
|
2012-05-13 18:18:54 +02:00
|
|
|
|
2016-05-16 17:24:51 +02:00
|
|
|
/* If this job did nothing to respective unit we don't log the status message */
|
|
|
|
if (!already)
|
2018-11-13 19:57:43 +01:00
|
|
|
job_emit_done_status_message(u, j->id, t, result);
|
2012-05-13 18:18:54 +02:00
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
/* Patch restart jobs so that they become normal start jobs */
|
2012-05-13 18:18:54 +02:00
|
|
|
if (result == JOB_DONE && t == JOB_RESTART) {
|
2010-01-29 03:18:09 +01:00
|
|
|
|
2012-03-28 01:26:04 +02:00
|
|
|
job_change_type(j, JOB_START);
|
2015-01-05 17:22:10 +01:00
|
|
|
job_set_state(j, JOB_WAITING);
|
2010-07-21 05:16:31 +02:00
|
|
|
|
2017-11-07 17:14:15 +01:00
|
|
|
job_add_to_dbus_queue(j);
|
2010-07-21 05:16:31 +02:00
|
|
|
job_add_to_run_queue(j);
|
2016-11-15 19:32:50 +01:00
|
|
|
job_add_to_gc_queue(j);
|
2011-09-21 19:56:15 +02:00
|
|
|
|
|
|
|
goto finish;
|
2010-01-23 01:52:57 +01:00
|
|
|
}
|
|
|
|
|
2017-09-29 00:37:23 +02:00
|
|
|
if (IN_SET(result, JOB_FAILED, JOB_INVALID))
|
2016-02-23 05:32:04 +01:00
|
|
|
j->manager->n_failed_jobs++;
|
2010-09-23 15:38:42 +02:00
|
|
|
|
2012-04-20 10:21:37 +02:00
|
|
|
job_uninstall(j);
|
2018-12-10 18:52:28 +01:00
|
|
|
job_free(j);
|
2010-01-23 01:52:57 +01:00
|
|
|
|
|
|
|
/* Fail depending jobs on failure */
|
2012-04-24 11:21:03 +02:00
|
|
|
if (result != JOB_DONE && recursive) {
|
2015-05-19 01:24:28 +02:00
|
|
|
if (IN_SET(t, JOB_START, JOB_VERIFY_ACTIVE)) {
|
|
|
|
job_fail_dependencies(u, UNIT_REQUIRED_BY);
|
|
|
|
job_fail_dependencies(u, UNIT_REQUISITE_OF);
|
|
|
|
job_fail_dependencies(u, UNIT_BOUND_BY);
|
|
|
|
} else if (t == JOB_STOP)
|
|
|
|
job_fail_dependencies(u, UNIT_CONFLICTED_BY);
|
2010-01-23 01:52:57 +01:00
|
|
|
}
|
|
|
|
|
Fail RequisiteOf units with oneshots
Fixes: #11422
Oneshots going to inactive directly without ever entering UNIT_ACTIVE is
considered success. This however means that if something both Requires=
and Requisites= a unit of such nature, the verify-active job getting
merged into the start job makes it lose this property of failing the
depending jobs, as there, the start job has the result JOB_DONE on
success, so we never walk over RequisiteOf units.
This change makes sure that such units always go down. It is also only
meaningful with After=, but so is Requisite= itself. Also, we also catch
cases like a oneshot having RemainAfterExit= true making us start up
properly in such a setting, but then removing it, reloading the unit,
and restarting it. In such a case, we go down due to restart propagation
before them, and our start job waits on theirs, properly failing with
the JOB_DEPENDENCY result.
This covers cases where ConditionXYZ= creates a similar situation as
well.
2019-01-19 06:19:46 +01:00
|
|
|
/* A special check to make sure we take down anything RequisiteOf if we
|
|
|
|
* aren't active. This is when the verify-active job merges with a
|
|
|
|
* satisfying job type, and then loses it's invalidation effect, as the
|
|
|
|
* result there is JOB_DONE for the start job we merged into, while we
|
|
|
|
* should be failing the depending job if the said unit isn't infact
|
|
|
|
* active. Oneshots are an example of this, where going directly from
|
|
|
|
* activating to inactive is success.
|
|
|
|
*
|
|
|
|
* This happens when you use ConditionXYZ= in a unit too, since in that
|
|
|
|
* case the job completes with the JOB_DONE result, but the unit never
|
|
|
|
* really becomes active. Note that such a case still involves merging:
|
|
|
|
*
|
|
|
|
* A start job waits for something else, and a verify-active comes in
|
|
|
|
* and merges in the installed job. Then, later, when it becomes
|
|
|
|
* runnable, it finishes with JOB_DONE result as execution on conditions
|
|
|
|
* not being met is skipped, breaking our dependency semantics.
|
|
|
|
*
|
|
|
|
* Also, depending on if start job waits or not, the merging may or may
|
|
|
|
* not happen (the verify-active job may trigger after it finishes), so
|
|
|
|
* you get undeterministic results without this check.
|
|
|
|
*/
|
|
|
|
if (result == JOB_DONE && recursive && !UNIT_IS_ACTIVE_OR_RELOADING(unit_active_state(u))) {
|
|
|
|
if (IN_SET(t, JOB_START, JOB_RELOAD))
|
|
|
|
job_fail_dependencies(u, UNIT_REQUISITE_OF);
|
|
|
|
}
|
2011-02-24 03:24:23 +01:00
|
|
|
/* Trigger OnFailure dependencies that are not generated by
|
2013-01-05 18:00:35 +01:00
|
|
|
* the unit itself. We don't treat JOB_CANCELED as failure in
|
2011-02-24 03:24:23 +01:00
|
|
|
* this context. And JOB_FAILURE is already handled by the
|
|
|
|
* unit itself. */
|
2017-09-20 18:29:08 +02:00
|
|
|
if (IN_SET(result, JOB_TIMEOUT, JOB_DEPENDENCY)) {
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_struct(LOG_NOTICE,
|
|
|
|
"JOB_TYPE=%s", job_type_to_string(t),
|
|
|
|
"JOB_RESULT=%s", job_result_to_string(result),
|
|
|
|
LOG_UNIT_ID(u),
|
|
|
|
LOG_UNIT_MESSAGE(u, "Job %s/%s failed with result '%s'.",
|
2014-11-28 02:05:14 +01:00
|
|
|
u->id,
|
|
|
|
job_type_to_string(t),
|
2018-06-04 12:59:22 +02:00
|
|
|
job_result_to_string(result)));
|
2011-04-07 04:11:31 +02:00
|
|
|
|
2013-04-23 20:53:16 +02:00
|
|
|
unit_start_on_failure(u);
|
2011-04-07 04:11:31 +02:00
|
|
|
}
|
2011-02-24 03:24:23 +01:00
|
|
|
|
2013-04-23 20:53:16 +02:00
|
|
|
unit_trigger_notify(u);
|
|
|
|
|
2011-09-21 19:56:15 +02:00
|
|
|
finish:
|
2010-01-23 01:52:57 +01:00
|
|
|
/* Try to start the next jobs that can be started */
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_AFTER], i)
|
2016-11-15 19:32:50 +01:00
|
|
|
if (other->job) {
|
2012-01-15 12:04:08 +01:00
|
|
|
job_add_to_run_queue(other->job);
|
2016-11-15 19:32:50 +01:00
|
|
|
job_add_to_gc_queue(other->job);
|
|
|
|
}
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, u->dependencies[UNIT_BEFORE], i)
|
2016-11-15 19:32:50 +01:00
|
|
|
if (other->job) {
|
2012-01-15 12:04:08 +01:00
|
|
|
job_add_to_run_queue(other->job);
|
2016-11-15 19:32:50 +01:00
|
|
|
job_add_to_gc_queue(other->job);
|
|
|
|
}
|
2010-01-23 01:52:57 +01:00
|
|
|
|
2012-01-15 12:04:08 +01:00
|
|
|
manager_check_finished(u->manager);
|
2010-09-21 04:14:38 +02:00
|
|
|
|
2012-04-24 11:21:03 +02:00
|
|
|
return 0;
|
2010-01-23 01:52:57 +01:00
|
|
|
}
|
2010-01-26 04:18:44 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
static int job_dispatch_timer(sd_event_source *s, uint64_t monotonic, void *userdata) {
|
|
|
|
Job *j = userdata;
|
2014-10-28 01:49:07 +01:00
|
|
|
Unit *u;
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(j);
|
|
|
|
assert(s == j->timer_event_source);
|
2010-07-17 04:09:28 +02:00
|
|
|
|
core,network: major per-object logging rework
This changes log_unit_info() (and friends) to take a real Unit* object
insted of just a unit name as parameter. The call will now prefix all
logged messages with the unit name, thus allowing the unit name to be
dropped from the various passed romat strings, simplifying invocations
drastically, and unifying log output across messages. Also, UNIT= vs.
USER_UNIT= is now derived from the Manager object attached to the Unit
object, instead of getpid(). This has the benefit of correcting the
field for --test runs.
Also contains a couple of other logging improvements:
- Drops a couple of strerror() invocations in favour of using %m.
- Not only .mount units now warn if a symlinks exist for the mount
point already, .automount units do that too, now.
- A few invocations of log_struct() that didn't actually pass any
additional structured data have been replaced by simpler invocations
of log_unit_info() and friends.
- For structured data a new LOG_UNIT_MESSAGE() macro has been added,
that works like LOG_MESSAGE() but prefixes the message with the unit
name. Similar, there's now LOG_LINK_MESSAGE() and
LOG_NETDEV_MESSAGE().
- For structured data new LOG_UNIT_ID(), LOG_LINK_INTERFACE(),
LOG_NETDEV_INTERFACE() macros have been added that generate the
necessary per object fields. The old log_unit_struct() call has been
removed in favour of these new macros used in raw log_struct()
invocations. In addition to removing one more function call this
allows generated structured log messages that contain two object
fields, as necessary for example for network interfaces that are
joined into another network interface, and whose messages shall be
indexed by both.
- The LOG_ERRNO() macro has been removed, in favour of
log_struct_errno(). The latter has the benefit of ensuring that %m in
format strings is properly resolved to the specified error number.
- A number of logging messages have been converted to use
log_unit_info() instead of log_info()
- The client code in sysv-generator no longer #includes core code from
src/core/.
- log_unit_full_errno() has been removed, log_unit_full() instead takes
an errno now, too.
- log_unit_info(), log_link_info(), log_netdev_info() and friends, now
avoid double evaluation of their parameters
2015-05-11 20:38:21 +02:00
|
|
|
log_unit_warning(j->unit, "Job %s/%s timed out.", j->unit->id, job_type_to_string(j->type));
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2014-10-28 01:49:07 +01:00
|
|
|
u = j->unit;
|
2016-05-16 17:24:51 +02:00
|
|
|
job_finish_and_invalidate(j, JOB_TIMEOUT, true, false);
|
2014-10-28 01:49:07 +01:00
|
|
|
|
2018-10-17 17:27:20 +02:00
|
|
|
emergency_action(u->manager, u->job_timeout_action,
|
|
|
|
EMERGENCY_ACTION_IS_WATCHDOG|EMERGENCY_ACTION_WARN,
|
2018-11-16 11:41:18 +01:00
|
|
|
u->job_timeout_reboot_arg, -1, "job timed out");
|
2014-10-28 01:49:07 +01:00
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return 0;
|
|
|
|
}
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2017-02-17 17:47:20 +01:00
|
|
|
int job_start_timer(Job *j, bool job_running) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int r;
|
2017-06-18 17:51:17 +02:00
|
|
|
usec_t timeout_time, old_timeout_time;
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2017-02-17 17:47:20 +01:00
|
|
|
if (job_running) {
|
2017-06-18 17:51:17 +02:00
|
|
|
j->begin_running_usec = now(CLOCK_MONOTONIC);
|
|
|
|
|
2017-02-17 17:47:20 +01:00
|
|
|
if (j->unit->job_running_timeout == USEC_INFINITY)
|
|
|
|
return 0;
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2017-06-18 17:51:17 +02:00
|
|
|
timeout_time = usec_add(j->begin_running_usec, j->unit->job_running_timeout);
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2017-02-17 17:47:20 +01:00
|
|
|
if (j->timer_event_source) {
|
|
|
|
/* Update only if JobRunningTimeoutSec= results in earlier timeout */
|
|
|
|
r = sd_event_source_get_time(j->timer_event_source, &old_timeout_time);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
|
|
|
|
if (old_timeout_time <= timeout_time)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return sd_event_source_set_time(j->timer_event_source, timeout_time);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (j->timer_event_source)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
j->begin_usec = now(CLOCK_MONOTONIC);
|
|
|
|
|
|
|
|
if (j->unit->job_timeout == USEC_INFINITY)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
timeout_time = usec_add(j->begin_usec, j->unit->job_timeout);
|
|
|
|
}
|
2014-01-29 00:25:39 +01:00
|
|
|
|
2014-03-24 02:49:09 +01:00
|
|
|
r = sd_event_add_time(
|
|
|
|
j->manager->event,
|
|
|
|
&j->timer_event_source,
|
|
|
|
CLOCK_MONOTONIC,
|
2017-02-17 17:47:20 +01:00
|
|
|
timeout_time, 0,
|
2014-03-24 02:49:09 +01:00
|
|
|
job_dispatch_timer, j);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2010-07-17 04:09:28 +02:00
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(j->timer_event_source, "job-start");
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return 0;
|
2010-07-17 04:09:28 +02:00
|
|
|
}
|
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
void job_add_to_run_queue(Job *j) {
|
2018-11-13 19:40:02 +01:00
|
|
|
int r;
|
|
|
|
|
2010-01-26 04:18:44 +01:00
|
|
|
assert(j);
|
2010-01-26 19:25:02 +01:00
|
|
|
assert(j->installed);
|
2010-01-26 04:18:44 +01:00
|
|
|
|
|
|
|
if (j->in_run_queue)
|
|
|
|
return;
|
|
|
|
|
2018-11-13 19:40:02 +01:00
|
|
|
if (!j->manager->run_queue) {
|
|
|
|
r = sd_event_source_set_enabled(j->manager->run_queue_event_source, SD_EVENT_ONESHOT);
|
|
|
|
if (r < 0)
|
|
|
|
log_warning_errno(r, "Failed to enable job run queue event source, ignoring: %m");
|
|
|
|
}
|
2013-11-25 15:22:41 +01:00
|
|
|
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_PREPEND(run_queue, j->manager->run_queue, j);
|
2010-01-26 04:18:44 +01:00
|
|
|
j->in_run_queue = true;
|
|
|
|
}
|
2010-01-30 01:55:42 +01:00
|
|
|
|
2010-02-05 00:38:41 +01:00
|
|
|
void job_add_to_dbus_queue(Job *j) {
|
|
|
|
assert(j);
|
|
|
|
assert(j->installed);
|
|
|
|
|
|
|
|
if (j->in_dbus_queue)
|
|
|
|
return;
|
|
|
|
|
2010-07-05 00:58:07 +02:00
|
|
|
/* We don't check if anybody is subscribed here, since this
|
|
|
|
* job might just have been created and not yet assigned to a
|
|
|
|
* connection/client. */
|
2010-05-16 03:57:07 +02:00
|
|
|
|
2013-10-14 06:10:14 +02:00
|
|
|
LIST_PREPEND(dbus_queue, j->manager->dbus_job_queue, j);
|
2010-02-05 00:38:41 +01:00
|
|
|
j->in_dbus_queue = true;
|
|
|
|
}
|
|
|
|
|
2010-02-01 03:33:24 +01:00
|
|
|
char *job_dbus_path(Job *j) {
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
2013-12-30 23:22:26 +01:00
|
|
|
if (asprintf(&p, "/org/freedesktop/systemd1/job/%"PRIu32, j->id) < 0)
|
2010-02-01 03:33:24 +01:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:12:01 +02:00
|
|
|
int job_serialize(Job *j, FILE *f) {
|
|
|
|
assert(j);
|
|
|
|
assert(f);
|
|
|
|
|
2018-10-17 20:40:09 +02:00
|
|
|
(void) serialize_item_format(f, "job-id", "%u", j->id);
|
|
|
|
(void) serialize_item(f, "job-type", job_type_to_string(j->type));
|
|
|
|
(void) serialize_item(f, "job-state", job_state_to_string(j->state));
|
|
|
|
(void) serialize_bool(f, "job-irreversible", j->irreversible);
|
|
|
|
(void) serialize_bool(f, "job-sent-dbus-new-signal", j->sent_dbus_new_signal);
|
|
|
|
(void) serialize_bool(f, "job-ignore-order", j->ignore_order);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
|
|
|
if (j->begin_usec > 0)
|
2018-10-17 20:40:09 +02:00
|
|
|
(void) serialize_usec(f, "job-begin", j->begin_usec);
|
2017-06-18 17:51:17 +02:00
|
|
|
if (j->begin_running_usec > 0)
|
2018-10-17 20:40:09 +02:00
|
|
|
(void) serialize_usec(f, "job-begin-running", j->begin_running_usec);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2016-11-15 19:29:50 +01:00
|
|
|
bus_track_serialize(j->bus_track, f, "subscribed");
|
2012-04-23 01:24:04 +02:00
|
|
|
|
|
|
|
/* End marker */
|
|
|
|
fputc('\n', f);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-08-15 18:12:01 +02:00
|
|
|
int job_deserialize(Job *j, FILE *f) {
|
2018-10-17 18:36:24 +02:00
|
|
|
int r;
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
assert(j);
|
2016-08-15 18:12:01 +02:00
|
|
|
assert(f);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
for (;;) {
|
2018-10-17 18:36:24 +02:00
|
|
|
_cleanup_free_ char *line = NULL;
|
|
|
|
char *l, *v;
|
2012-04-23 01:24:04 +02:00
|
|
|
size_t k;
|
|
|
|
|
2018-10-17 18:36:24 +02:00
|
|
|
r = read_line(f, LONG_LINE_MAX, &line);
|
|
|
|
if (r < 0)
|
|
|
|
return log_error_errno(r, "Failed to read serialization line: %m");
|
|
|
|
if (r == 0)
|
|
|
|
return 0;
|
2012-04-23 01:24:04 +02:00
|
|
|
|
|
|
|
l = strstrip(line);
|
|
|
|
|
|
|
|
/* End marker */
|
2018-10-17 18:36:24 +02:00
|
|
|
if (isempty(l))
|
2012-04-23 01:24:04 +02:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
k = strcspn(l, "=");
|
|
|
|
|
|
|
|
if (l[k] == '=') {
|
|
|
|
l[k] = 0;
|
|
|
|
v = l+k+1;
|
|
|
|
} else
|
|
|
|
v = l+k;
|
|
|
|
|
|
|
|
if (streq(l, "job-id")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
if (safe_atou32(v, &j->id) < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job id value: %s", v);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
} else if (streq(l, "job-type")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
JobType t;
|
|
|
|
|
|
|
|
t = job_type_from_string(v);
|
2012-04-23 01:24:04 +02:00
|
|
|
if (t < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job type: %s", v);
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
else if (t >= _JOB_TYPE_MAX_IN_TRANSACTION)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Cannot deserialize job of type: %s", v);
|
2012-04-23 01:24:04 +02:00
|
|
|
else
|
|
|
|
j->type = t;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
} else if (streq(l, "job-state")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
JobState s;
|
|
|
|
|
|
|
|
s = job_state_from_string(v);
|
2012-04-23 01:24:04 +02:00
|
|
|
if (s < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job state: %s", v);
|
2012-04-23 01:24:04 +02:00
|
|
|
else
|
2015-01-05 17:22:10 +01:00
|
|
|
job_set_state(j, s);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2013-02-22 11:21:37 +01:00
|
|
|
} else if (streq(l, "job-irreversible")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(v);
|
2013-02-22 11:21:37 +01:00
|
|
|
if (b < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job irreversible flag: %s", v);
|
2013-02-22 11:21:37 +01:00
|
|
|
else
|
|
|
|
j->irreversible = j->irreversible || b;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
} else if (streq(l, "job-sent-dbus-new-signal")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(v);
|
2012-04-23 01:24:04 +02:00
|
|
|
if (b < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job sent_dbus_new_signal flag: %s", v);
|
2012-04-23 01:24:04 +02:00
|
|
|
else
|
|
|
|
j->sent_dbus_new_signal = j->sent_dbus_new_signal || b;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2012-04-23 01:24:04 +02:00
|
|
|
} else if (streq(l, "job-ignore-order")) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int b;
|
|
|
|
|
|
|
|
b = parse_boolean(v);
|
2012-04-23 01:24:04 +02:00
|
|
|
if (b < 0)
|
2018-10-17 20:16:52 +02:00
|
|
|
log_debug("Failed to parse job ignore_order flag: %s", v);
|
2012-04-23 01:24:04 +02:00
|
|
|
else
|
|
|
|
j->ignore_order = j->ignore_order || b;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2018-10-17 20:40:09 +02:00
|
|
|
} else if (streq(l, "job-begin"))
|
|
|
|
(void) deserialize_usec(v, &j->begin_usec);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2018-10-17 20:40:09 +02:00
|
|
|
else if (streq(l, "job-begin-running"))
|
|
|
|
(void) deserialize_usec(v, &j->begin_running_usec);
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2018-10-17 20:40:09 +02:00
|
|
|
else if (streq(l, "subscribed")) {
|
2014-08-06 11:53:00 +02:00
|
|
|
if (strv_extend(&j->deserialized_clients, v) < 0)
|
2018-10-17 20:40:09 +02:00
|
|
|
return log_oom();
|
|
|
|
} else
|
|
|
|
log_debug("Unknown job serialization key: %s", l);
|
2012-04-23 01:24:04 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int job_coldplug(Job *j) {
|
2013-11-19 21:12:59 +01:00
|
|
|
int r;
|
2017-06-18 17:51:17 +02:00
|
|
|
usec_t timeout_time = USEC_INFINITY;
|
2013-11-19 21:12:59 +01:00
|
|
|
|
|
|
|
assert(j);
|
2012-04-23 01:24:04 +02:00
|
|
|
|
2014-03-03 01:33:45 +01:00
|
|
|
/* After deserialization is complete and the bus connection
|
|
|
|
* set up again, let's start watching our subscribers again */
|
2016-11-15 19:32:50 +01:00
|
|
|
(void) bus_job_coldplug_bus_track(j);
|
2014-03-03 01:33:45 +01:00
|
|
|
|
2014-05-12 09:26:16 +02:00
|
|
|
if (j->state == JOB_WAITING)
|
|
|
|
job_add_to_run_queue(j);
|
|
|
|
|
2016-11-15 19:32:50 +01:00
|
|
|
/* Maybe due to new dependencies we don't actually need this job anymore? */
|
|
|
|
job_add_to_gc_queue(j);
|
|
|
|
|
2017-06-18 17:51:17 +02:00
|
|
|
/* Create timer only when job began or began running and the respective timeout is finite.
|
|
|
|
* Follow logic of job_start_timer() if both timeouts are finite */
|
|
|
|
if (j->begin_usec == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (j->unit->job_timeout != USEC_INFINITY)
|
|
|
|
timeout_time = usec_add(j->begin_usec, j->unit->job_timeout);
|
|
|
|
|
|
|
|
if (j->begin_running_usec > 0 && j->unit->job_running_timeout != USEC_INFINITY)
|
|
|
|
timeout_time = MIN(timeout_time, usec_add(j->begin_running_usec, j->unit->job_running_timeout));
|
|
|
|
|
|
|
|
if (timeout_time == USEC_INFINITY)
|
2012-04-23 01:24:04 +02:00
|
|
|
return 0;
|
|
|
|
|
core: rework unit timeout handling, and add new setting RuntimeMaxSec=
This clean-ups timeout handling in PID 1. Specifically, instead of storing 0 in internal timeout variables as
indication for a disabled timeout, use USEC_INFINITY which is in-line with how we do this in the rest of our code
(following the logic that 0 means "no", and USEC_INFINITY means "never").
This also replace all usec_t additions with invocations to usec_add(), so that USEC_INFINITY is properly propagated,
and sd-event considers it has indication for turning off the event source.
This also alters the deserialization of the units to restart timeouts from the time they were originally started from.
Before this patch timeouts would be restarted beginning with the time of the deserialization, which could lead to
artificially prolonged timeouts if a daemon reload took place.
Finally, a new RuntimeMaxSec= setting is introduced for service units, that specifies a maximum runtime after which a
specific service is forcibly terminated. This is useful to put time limits on time-intensive processing jobs.
This also simplifies the various xyz_spawn() calls of the various types in that explicit distruction of the timers is
removed, as that is done anyway by the state change handlers, and a state change is always done when the xyz_spawn()
calls fail.
Fixes: #2249
2016-02-01 21:48:10 +01:00
|
|
|
j->timer_event_source = sd_event_source_unref(j->timer_event_source);
|
2012-04-23 01:24:04 +02:00
|
|
|
|
2014-03-24 02:49:09 +01:00
|
|
|
r = sd_event_add_time(
|
|
|
|
j->manager->event,
|
|
|
|
&j->timer_event_source,
|
|
|
|
CLOCK_MONOTONIC,
|
2017-06-18 17:51:17 +02:00
|
|
|
timeout_time, 0,
|
2014-03-24 02:49:09 +01:00
|
|
|
job_dispatch_timer, j);
|
2013-11-19 21:12:59 +01:00
|
|
|
if (r < 0)
|
2014-11-28 13:19:16 +01:00
|
|
|
log_debug_errno(r, "Failed to restart timeout for job: %m");
|
2013-11-19 21:12:59 +01:00
|
|
|
|
2015-04-29 16:05:32 +02:00
|
|
|
(void) sd_event_source_set_description(j->timer_event_source, "job-timeout");
|
|
|
|
|
2013-11-19 21:12:59 +01:00
|
|
|
return r;
|
2012-04-23 01:24:04 +02:00
|
|
|
}
|
|
|
|
|
2013-01-25 22:33:33 +01:00
|
|
|
void job_shutdown_magic(Job *j) {
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
/* The shutdown target gets some special treatment here: we
|
|
|
|
* tell the kernel to begin with flushing its disk caches, to
|
|
|
|
* optimize shutdown time a bit. Ideally we wouldn't hardcode
|
|
|
|
* this magic into PID 1. However all other processes aren't
|
|
|
|
* options either since they'd exit much sooner than PID 1 and
|
|
|
|
* asynchronous sync() would cause their exit to be
|
|
|
|
* delayed. */
|
|
|
|
|
2013-06-28 04:12:58 +02:00
|
|
|
if (j->type != JOB_START)
|
2013-01-25 22:33:33 +01:00
|
|
|
return;
|
|
|
|
|
2016-02-24 21:24:23 +01:00
|
|
|
if (!MANAGER_IS_SYSTEM(j->unit->manager))
|
2013-06-28 04:12:58 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!unit_has_name(j->unit, SPECIAL_SHUTDOWN_TARGET))
|
2013-01-25 22:33:33 +01:00
|
|
|
return;
|
|
|
|
|
2013-09-20 22:18:30 +02:00
|
|
|
/* In case messages on console has been disabled on boot */
|
|
|
|
j->unit->manager->no_console_output = false;
|
|
|
|
|
2015-09-07 13:42:47 +02:00
|
|
|
if (detect_container() > 0)
|
2013-01-25 22:33:33 +01:00
|
|
|
return;
|
|
|
|
|
2017-12-22 13:19:56 +01:00
|
|
|
(void) asynchronous_sync(NULL);
|
2013-01-25 22:33:33 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 00:35:43 +01:00
|
|
|
int job_get_timeout(Job *j, usec_t *timeout) {
|
|
|
|
usec_t x = USEC_INFINITY, y = USEC_INFINITY;
|
2014-01-27 06:57:34 +01:00
|
|
|
Unit *u = j->unit;
|
2016-02-04 00:35:43 +01:00
|
|
|
int r;
|
2014-01-27 06:57:34 +01:00
|
|
|
|
|
|
|
assert(u);
|
|
|
|
|
|
|
|
if (j->timer_event_source) {
|
|
|
|
r = sd_event_source_get_time(j->timer_event_source, &x);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (UNIT_VTABLE(u)->get_timeout) {
|
2016-02-04 00:35:43 +01:00
|
|
|
r = UNIT_VTABLE(u)->get_timeout(u, &y);
|
|
|
|
if (r < 0)
|
|
|
|
return r;
|
2014-01-27 06:57:34 +01:00
|
|
|
}
|
|
|
|
|
2016-02-04 00:35:43 +01:00
|
|
|
if (x == USEC_INFINITY && y == USEC_INFINITY)
|
2014-01-27 06:57:34 +01:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
*timeout = MIN(x, y);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-02-14 00:39:06 +01:00
|
|
|
bool job_may_gc(Job *j) {
|
2016-11-15 19:32:50 +01:00
|
|
|
Unit *other;
|
|
|
|
Iterator i;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
/* Checks whether this job should be GC'ed away. We only do this for jobs of units that have no effect on their
|
2018-02-14 00:39:06 +01:00
|
|
|
* own and just track external state. For now the only unit type that qualifies for this are .device units.
|
|
|
|
* Returns true if the job can be collected. */
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
if (!UNIT_VTABLE(j->unit)->gc_jobs)
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
if (sd_bus_track_count(j->bus_track) > 0)
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
/* FIXME: So this is a bit ugly: for now we don't properly track references made via private bus connections
|
|
|
|
* (because it's nasty, as sd_bus_track doesn't apply to it). We simply remember that the job was once
|
|
|
|
* referenced by one, and reset this whenever we notice that no private bus connections are around. This means
|
|
|
|
* the GC is a bit too conservative when it comes to jobs created by private bus connections. */
|
|
|
|
if (j->ref_by_private_bus) {
|
|
|
|
if (set_isempty(j->unit->manager->private_buses))
|
|
|
|
j->ref_by_private_bus = false;
|
|
|
|
else
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (j->type == JOB_NOP)
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
|
|
|
|
/* If a job is ordered after ours, and is to be started, then it needs to wait for us, regardless if we stop or
|
|
|
|
* start, hence let's not GC in that case. */
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_BEFORE], i) {
|
2016-11-15 19:32:50 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (other->job->ignore_order)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (IN_SET(other->job->type, JOB_START, JOB_VERIFY_ACTIVE, JOB_RELOAD))
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
}
|
|
|
|
|
2017-07-02 01:05:36 +02:00
|
|
|
/* If we are going down, but something else is ordered After= us, then it needs to wait for us */
|
|
|
|
if (IN_SET(j->type, JOB_STOP, JOB_RESTART))
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_AFTER], i) {
|
2016-11-15 19:32:50 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (other->job->ignore_order)
|
|
|
|
continue;
|
|
|
|
|
2018-02-14 00:39:06 +01:00
|
|
|
return false;
|
2016-11-15 19:32:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The logic above is kinda the inverse of the job_is_runnable() logic. Specifically, if the job "we" is
|
|
|
|
* ordered before the job "other":
|
|
|
|
*
|
|
|
|
* we start + other start → stay
|
|
|
|
* we start + other stop → gc
|
|
|
|
* we stop + other start → stay
|
|
|
|
* we stop + other stop → gc
|
|
|
|
*
|
|
|
|
* "we" are ordered after "other":
|
|
|
|
*
|
|
|
|
* we start + other start → gc
|
|
|
|
* we start + other stop → gc
|
|
|
|
* we stop + other start → stay
|
|
|
|
* we stop + other stop → stay
|
|
|
|
*/
|
|
|
|
|
2018-02-14 00:39:06 +01:00
|
|
|
return true;
|
2016-11-15 19:32:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void job_add_to_gc_queue(Job *j) {
|
|
|
|
assert(j);
|
|
|
|
|
|
|
|
if (j->in_gc_queue)
|
|
|
|
return;
|
|
|
|
|
2018-02-14 00:39:06 +01:00
|
|
|
if (!job_may_gc(j))
|
2016-11-15 19:32:50 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
LIST_PREPEND(gc_queue, j->unit->manager->gc_job_queue, j);
|
|
|
|
j->in_gc_queue = true;
|
|
|
|
}
|
|
|
|
|
2018-09-18 01:39:24 +02:00
|
|
|
static int job_compare(Job * const *a, Job * const *b) {
|
|
|
|
return CMP((*a)->id, (*b)->id);
|
2016-11-16 16:07:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t sort_job_list(Job **list, size_t n) {
|
|
|
|
Job *previous = NULL;
|
|
|
|
size_t a, b;
|
|
|
|
|
|
|
|
/* Order by numeric IDs */
|
2018-09-18 01:39:24 +02:00
|
|
|
typesafe_qsort(list, n, job_compare);
|
2016-11-16 16:07:32 +01:00
|
|
|
|
|
|
|
/* Filter out duplicates */
|
|
|
|
for (a = 0, b = 0; a < n; a++) {
|
|
|
|
|
|
|
|
if (previous == list[a])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
previous = list[b++] = list[a];
|
|
|
|
}
|
|
|
|
|
|
|
|
return b;
|
|
|
|
}
|
|
|
|
|
|
|
|
int job_get_before(Job *j, Job*** ret) {
|
|
|
|
_cleanup_free_ Job** list = NULL;
|
|
|
|
size_t n = 0, n_allocated = 0;
|
|
|
|
Unit *other = NULL;
|
|
|
|
Iterator i;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2016-11-16 16:07:32 +01:00
|
|
|
|
|
|
|
/* Returns a list of all pending jobs that need to finish before this job may be started. */
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
assert(ret);
|
|
|
|
|
|
|
|
if (j->ignore_order) {
|
|
|
|
*ret = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IN_SET(j->type, JOB_START, JOB_VERIFY_ACTIVE, JOB_RELOAD)) {
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_AFTER], i) {
|
2016-11-16 16:07:32 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!GREEDY_REALLOC(list, n_allocated, n+1))
|
|
|
|
return -ENOMEM;
|
|
|
|
list[n++] = other->job;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_BEFORE], i) {
|
2016-11-16 16:07:32 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!IN_SET(other->job->type, JOB_STOP, JOB_RESTART))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!GREEDY_REALLOC(list, n_allocated, n+1))
|
|
|
|
return -ENOMEM;
|
|
|
|
list[n++] = other->job;
|
|
|
|
}
|
|
|
|
|
|
|
|
n = sort_job_list(list, n);
|
|
|
|
|
2018-04-05 07:26:26 +02:00
|
|
|
*ret = TAKE_PTR(list);
|
2016-11-16 16:07:32 +01:00
|
|
|
|
|
|
|
return (int) n;
|
|
|
|
}
|
|
|
|
|
|
|
|
int job_get_after(Job *j, Job*** ret) {
|
|
|
|
_cleanup_free_ Job** list = NULL;
|
|
|
|
size_t n = 0, n_allocated = 0;
|
|
|
|
Unit *other = NULL;
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
void *v;
|
2016-11-16 16:07:32 +01:00
|
|
|
Iterator i;
|
|
|
|
|
|
|
|
assert(j);
|
|
|
|
assert(ret);
|
|
|
|
|
|
|
|
/* Returns a list of all pending jobs that are waiting for this job to finish. */
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_BEFORE], i) {
|
2016-11-16 16:07:32 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (other->job->ignore_order)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!IN_SET(other->job->type, JOB_START, JOB_VERIFY_ACTIVE, JOB_RELOAD))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!GREEDY_REALLOC(list, n_allocated, n+1))
|
|
|
|
return -ENOMEM;
|
|
|
|
list[n++] = other->job;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IN_SET(j->type, JOB_STOP, JOB_RESTART)) {
|
|
|
|
|
core: track why unit dependencies came to be
This replaces the dependencies Set* objects by Hashmap* objects, where
the key is the depending Unit, and the value is a bitmask encoding why
the specific dependency was created.
The bitmask contains a number of different, defined bits, that indicate
why dependencies exist, for example whether they are created due to
explicitly configured deps in files, by udev rules or implicitly.
Note that memory usage is not increased by this change, even though we
store more information, as we manage to encode the bit mask inside the
value pointer each Hashmap entry contains.
Why this all? When we know how a dependency came to be, we can update
dependencies correctly when a configuration source changes but others
are left unaltered. Specifically:
1. We can fix UDEV_WANTS dependency generation: so far we kept adding
dependencies configured that way, but if a device lost such a
dependency we couldn't them again as there was no scheme for removing
of dependencies in place.
2. We can implement "pin-pointed" reload of unit files. If we know what
dependencies were created as result of configuration in a unit file,
then we know what to flush out when we want to reload it.
3. It's useful for debugging: "systemd-analyze dump" now shows
this information, helping substantially with understanding how
systemd's dependency tree came to be the way it came to be.
2017-10-25 20:46:01 +02:00
|
|
|
HASHMAP_FOREACH_KEY(v, other, j->unit->dependencies[UNIT_AFTER], i) {
|
2016-11-16 16:07:32 +01:00
|
|
|
if (!other->job)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (other->job->ignore_order)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!GREEDY_REALLOC(list, n_allocated, n+1))
|
|
|
|
return -ENOMEM;
|
|
|
|
list[n++] = other->job;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n = sort_job_list(list, n);
|
|
|
|
|
2018-04-05 07:26:26 +02:00
|
|
|
*ret = TAKE_PTR(list);
|
2016-11-16 16:07:32 +01:00
|
|
|
|
|
|
|
return (int) n;
|
|
|
|
}
|
|
|
|
|
2010-01-30 01:55:42 +01:00
|
|
|
static const char* const job_state_table[_JOB_STATE_MAX] = {
|
|
|
|
[JOB_WAITING] = "waiting",
|
2016-11-15 19:19:57 +01:00
|
|
|
[JOB_RUNNING] = "running",
|
2010-01-30 01:55:42 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(job_state, JobState);
|
|
|
|
|
|
|
|
static const char* const job_type_table[_JOB_TYPE_MAX] = {
|
|
|
|
[JOB_START] = "start",
|
|
|
|
[JOB_VERIFY_ACTIVE] = "verify-active",
|
|
|
|
[JOB_STOP] = "stop",
|
|
|
|
[JOB_RELOAD] = "reload",
|
|
|
|
[JOB_RELOAD_OR_START] = "reload-or-start",
|
|
|
|
[JOB_RESTART] = "restart",
|
|
|
|
[JOB_TRY_RESTART] = "try-restart",
|
2016-01-28 18:48:42 +01:00
|
|
|
[JOB_TRY_RELOAD] = "try-reload",
|
core: add NOP jobs, job type collapsing
Two of our current job types are special:
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
They differ from other job types by being sensitive to the unit active state.
They perform some action when the unit is active and some other action
otherwise. This raises a question: when exactly should the unit state be
checked to make the decision?
Currently the unit state is checked when the job becomes runnable. It's more
sensible to check the state immediately when the job is added by the user.
When the user types "systemctl try-restart foo.service", he really intends
to restart the service if it's running right now. If it isn't running right
now, the restart is pointless.
Consider the example (from Bugzilla[1]):
sleep.service takes some time to start.
hello.service has After=sleep.service.
Both services get started. Two jobs will appear:
hello.service/start waiting
sleep.service/start running
Then someone runs "systemctl try-restart hello.service".
Currently the try-restart operation will block and wait for
sleep.service/start to complete.
The correct result is to complete the try-restart operation immediately
with success, because hello.service is not running. The two original
jobs must not be disturbed by this.
To fix this we introduce two new concepts:
- a new job type: JOB_NOP
A JOB_NOP job does not do anything to the unit. It does not pull in any
dependencies. It is always immediately runnable. When installed to a unit,
it sits in a special slot (u->nop_job) where it never conflicts with
the installed job (u->job) of a different type. It never merges with jobs
of other types, but it can merge into an already installed JOB_NOP job.
- "collapsing" of job types
When a job of one of the two special types is added, the state of the unit
is checked immediately and the job type changes:
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
collapses immediately afterwards.
Collapsing actually makes some things simpler, because there are now fewer
job types that are allowed in the transaction.
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
2012-04-25 11:58:27 +02:00
|
|
|
[JOB_NOP] = "nop",
|
2010-01-30 01:55:42 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(job_type, JobType);
|
2010-02-03 12:37:42 +01:00
|
|
|
|
|
|
|
static const char* const job_mode_table[_JOB_MODE_MAX] = {
|
|
|
|
[JOB_FAIL] = "fail",
|
2010-04-22 02:42:59 +02:00
|
|
|
[JOB_REPLACE] = "replace",
|
2013-02-22 11:21:37 +01:00
|
|
|
[JOB_REPLACE_IRREVERSIBLY] = "replace-irreversibly",
|
2011-02-16 21:59:31 +01:00
|
|
|
[JOB_ISOLATE] = "isolate",
|
2014-03-07 21:38:48 +01:00
|
|
|
[JOB_FLUSH] = "flush",
|
2011-04-06 19:09:33 +02:00
|
|
|
[JOB_IGNORE_DEPENDENCIES] = "ignore-dependencies",
|
2013-11-26 02:13:41 +01:00
|
|
|
[JOB_IGNORE_REQUIREMENTS] = "ignore-requirements",
|
2010-02-03 12:37:42 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(job_mode, JobMode);
|
2011-02-24 02:36:34 +01:00
|
|
|
|
|
|
|
static const char* const job_result_table[_JOB_RESULT_MAX] = {
|
|
|
|
[JOB_DONE] = "done",
|
|
|
|
[JOB_CANCELED] = "canceled",
|
|
|
|
[JOB_TIMEOUT] = "timeout",
|
|
|
|
[JOB_FAILED] = "failed",
|
2011-03-08 01:44:19 +01:00
|
|
|
[JOB_DEPENDENCY] = "dependency",
|
2013-12-03 03:52:51 +01:00
|
|
|
[JOB_SKIPPED] = "skipped",
|
|
|
|
[JOB_INVALID] = "invalid",
|
2014-11-06 13:43:45 +01:00
|
|
|
[JOB_ASSERT] = "assert",
|
2014-12-12 21:05:32 +01:00
|
|
|
[JOB_UNSUPPORTED] = "unsupported",
|
2016-11-15 19:32:50 +01:00
|
|
|
[JOB_COLLECTED] = "collected",
|
2018-04-27 20:35:10 +02:00
|
|
|
[JOB_ONCE] = "once",
|
2011-02-24 02:36:34 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(job_result, JobResult);
|
2016-01-31 14:26:09 +01:00
|
|
|
|
|
|
|
const char* job_type_to_access_method(JobType t) {
|
|
|
|
assert(t >= 0);
|
|
|
|
assert(t < _JOB_TYPE_MAX);
|
|
|
|
|
|
|
|
if (IN_SET(t, JOB_START, JOB_RESTART, JOB_TRY_RESTART))
|
|
|
|
return "start";
|
|
|
|
else if (t == JOB_STOP)
|
|
|
|
return "stop";
|
|
|
|
else
|
|
|
|
return "reload";
|
|
|
|
}
|