diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 312c211dd..893f4a56f 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -21,7 +21,7 @@ jobs: fetch-depth: 0 - name: Create backport PRs # should be kept in sync with `version` - uses: zeebe-io/backport-action@v2.0.0 + uses: zeebe-io/backport-action@v2.1.0 with: # Config README: https://github.com/zeebe-io/backport-action#backport-action github_token: ${{ secrets.GITHUB_TOKEN }} diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 3cfb53998..1e6ad6922 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -1,5 +1,8 @@ # Release X.Y (202?-??-??) +- The experimental nix command can now act as a [shebang interpreter](@docroot@/command-ref/new-cli/nix.md#shebang-interpreter) + by appending the contents of any `#! nix` lines and the script's location to a single call. + - [URL flake references](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references) now support [percent-encoded](https://datatracker.ietf.org/doc/html/rfc3986#section-2.1) characters. - [Path-like flake references](@docroot@/command-ref/new-cli/nix3-flake.md#path-like-syntax) now accept arbitrary unicode characters (except `#` and `?`). @@ -14,8 +17,8 @@ - `nix-shell` shebang lines now support single-quoted arguments. -- `builtins.fetchTree` is now marked as stable. - +- `builtins.fetchTree` is now its own experimental feature, [`fetch-tree`](@docroot@/contributing/experimental-features.md#xp-fetch-tree). + As described in the document for that feature, this is because we anticipate polishing it and then stabilizing it before the rest of Flakes. - The interface for creating and updating lock files has been overhauled: @@ -29,3 +32,44 @@ - The flake-specific flags `--recreate-lock-file` and `--update-input` have been removed from all commands operating on installables. They are superceded by `nix flake update`. + +- Commit signature verification for the [`builtins.fetchGit`](@docroot@/language/builtins.md#builtins-fetchGit) is added as the new [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + +- [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md) + (experimental) now returns a JSON map rather than JSON list. + The `path` field of each object has instead become the key in th outer map, since it is unique. + The `valid` field also goes away because we just use null instead. + + - Old way: + + ```json5 + [ + { + "path": "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15", + "valid": true, + // ... + }, + { + "path": "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path", + "valid": false + } + ] + ``` + + - New way + + ```json5 + { + "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15": { + // ... + }, + "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path": null, + } + ``` + + This makes it match `nix derivation show`, which also maps store paths to information. + +- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) + [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. + This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) + (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). diff --git a/flake.nix b/flake.nix index db2901ff8..e71aa5374 100644 --- a/flake.nix +++ b/flake.nix @@ -185,6 +185,7 @@ buildPackages.git buildPackages.mercurial # FIXME: remove? only needed for tests buildPackages.jq # Also for custom mdBook preprocessor. + buildPackages.openssh # only needed for tests (ssh-keygen) ] ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)]; diff --git a/maintainers/README.md b/maintainers/README.md index 5be4f9d04..ee97c1195 100644 --- a/maintainers/README.md +++ b/maintainers/README.md @@ -2,7 +2,7 @@ ## Motivation -The team's main responsibility is to set a direction for the development of Nix and ensure that the code is in good shape. +The team's main responsibility is to guide and direct the development of Nix and ensure that the code is in good shape. We aim to achieve this by improving the contributor experience and attracting more maintainers – that is, by helping other people contributing to Nix and eventually taking responsibility – in order to scale the development process to match users' needs. diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 08f812b31..f89ac4077 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -11,7 +11,6 @@ #include "derivations.hh" #include "globals.hh" #include "store-api.hh" -#include "util.hh" #include "crypto.hh" #include diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index 400696812..c23aa64f0 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -19,6 +19,14 @@ set __ETC_PROFILE_NIX_SOURCED 1 set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" +# Populate bash completions, .desktop files, etc +if test -z "$XDG_DATA_DIRS" + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:/nix/var/nix/profiles/default/share" +else + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:/nix/var/nix/profiles/default/share" +end + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if test -n "$NIX_SSH_CERT_FILE" : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 8cfd3149e..c63db4648 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -30,6 +30,14 @@ fi export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" +# Populate bash completions, .desktop files, etc +if [ -z "$XDG_DATA_DIRS" ]; then + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +else + export XDG_DATA_DIRS="$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +fi + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile.fish.in b/scripts/nix-profile.fish.in index 731498c76..619df52b8 100644 --- a/scripts/nix-profile.fish.in +++ b/scripts/nix-profile.fish.in @@ -20,6 +20,14 @@ if test -n "$HOME" && test -n "$USER" # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" + # Populate bash completions, .desktop files, etc + if test -z "$XDG_DATA_DIRS" + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + else + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + end + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if test -n "$NIX_SSH_CERT_FILE" : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index c4d60cf37..56e070ae1 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -32,6 +32,14 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" + # Populate bash completions, .desktop files, etc + if [ -z "$XDG_DATA_DIRS" ]; then + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + else + export XDG_DATA_DIRS="$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + fi + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt diff --git a/src/libcmd/built-path.hh b/src/libcmd/built-path.hh index e677bc810..7154cc504 100644 --- a/src/libcmd/built-path.hh +++ b/src/libcmd/built-path.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "derived-path.hh" #include "realisation.hh" diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index a88ba8134..de9f546fc 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -175,7 +175,7 @@ void BuiltPathsCommand::run(ref store, Installables && installables) throw UsageError("'--all' does not expect arguments"); // XXX: Only uses opaque paths, ignores all the realisations for (auto & p : store->queryAllValidPaths()) - paths.push_back(BuiltPath::Opaque{p}); + paths.emplace_back(BuiltPath::Opaque{p}); } else { paths = Installable::toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables); if (recursive) { @@ -188,7 +188,7 @@ void BuiltPathsCommand::run(ref store, Installables && installables) } store->computeFSClosure(pathsRoots, pathsClosure); for (auto & path : pathsClosure) - paths.push_back(BuiltPath::Opaque{path}); + paths.emplace_back(BuiltPath::Opaque{path}); } } diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index e53bc4c01..401acc38e 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -2,7 +2,6 @@ #include "common-eval-args.hh" #include "shared.hh" #include "filetransfer.hh" -#include "util.hh" #include "eval.hh" #include "fetchers.hh" #include "registry.hh" @@ -165,7 +164,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) return res.finish(); } -SourcePath lookupFileArg(EvalState & state, std::string_view s) +SourcePath lookupFileArg(EvalState & state, std::string_view s, CanonPath baseDir) { if (EvalSettings::isPseudoUrl(s)) { auto storePath = fetchers::downloadTarball( @@ -186,7 +185,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s) } else - return state.rootPath(CanonPath::fromCwd(s)); + return state.rootPath(CanonPath(s, baseDir)); } } diff --git a/src/libcmd/common-eval-args.hh b/src/libcmd/common-eval-args.hh index 6359b2579..4b403d936 100644 --- a/src/libcmd/common-eval-args.hh +++ b/src/libcmd/common-eval-args.hh @@ -2,6 +2,7 @@ ///@file #include "args.hh" +#include "canon-path.hh" #include "common-args.hh" #include "search-path.hh" @@ -28,6 +29,6 @@ private: std::map autoArgs; }; -SourcePath lookupFileArg(EvalState & state, std::string_view s); +SourcePath lookupFileArg(EvalState & state, std::string_view s, CanonPath baseDir = CanonPath::fromCwd()); } diff --git a/src/libcmd/editor-for.cc b/src/libcmd/editor-for.cc index a17c6f12a..619d3673f 100644 --- a/src/libcmd/editor-for.cc +++ b/src/libcmd/editor-for.cc @@ -1,5 +1,5 @@ -#include "util.hh" #include "editor-for.hh" +#include "environment-variables.hh" namespace nix { diff --git a/src/libcmd/installable-attr-path.hh b/src/libcmd/installable-attr-path.hh index e9f0c33da..86c2f8219 100644 --- a/src/libcmd/installable-attr-path.hh +++ b/src/libcmd/installable-attr-path.hh @@ -4,7 +4,6 @@ #include "globals.hh" #include "installable-value.hh" #include "outputs-spec.hh" -#include "util.hh" #include "command.hh" #include "attr-path.hh" #include "common-eval-args.hh" diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 3aff601e0..1c6103020 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -4,6 +4,7 @@ #include "installable-attr-path.hh" #include "installable-flake.hh" #include "outputs-spec.hh" +#include "users.hh" #include "util.hh" #include "command.hh" #include "attr-path.hh" @@ -87,7 +88,7 @@ MixFlakeOptions::MixFlakeOptions() lockFlags.writeLockFile = false; lockFlags.inputOverrides.insert_or_assign( flake::parseInputPath(inputPath), - parseFlakeRef(flakeRef, absPath("."), true)); + parseFlakeRef(flakeRef, absPath(getCommandBaseDir()), true)); }}, .completer = {[&](AddCompletions & completions, size_t n, std::string_view prefix) { if (n == 0) { @@ -129,7 +130,7 @@ MixFlakeOptions::MixFlakeOptions() auto evalState = getEvalState(); auto flake = flake::lockFlake( *evalState, - parseFlakeRef(flakeRef, absPath(".")), + parseFlakeRef(flakeRef, absPath(getCommandBaseDir())), { .writeLockFile = false }); for (auto & [inputName, input] : flake.lockFile.root->inputs) { auto input2 = flake.lockFile.findInput({inputName}); // resolve 'follows' nodes @@ -293,6 +294,8 @@ void completeFlakeRefWithFragment( prefixRoot = "."; } auto flakeRefS = std::string(prefix.substr(0, hash)); + + // TODO: ideally this would use the command base directory instead of assuming ".". auto flakeRef = parseFlakeRef(expandTilde(flakeRefS), absPath(".")); auto evalCache = openEvalCache(*evalState, @@ -441,10 +444,12 @@ Installables SourceExprCommand::parseInstallables( auto e = state->parseStdin(); state->eval(e, *vFile); } - else if (file) - state->evalFile(lookupFileArg(*state, *file), *vFile); + else if (file) { + state->evalFile(lookupFileArg(*state, *file, CanonPath::fromCwd(getCommandBaseDir())), *vFile); + } else { - auto e = state->parseExprFromString(*expr, state->rootPath(CanonPath::fromCwd())); + CanonPath dir(CanonPath::fromCwd(getCommandBaseDir())); + auto e = state->parseExprFromString(*expr, state->rootPath(dir)); state->eval(e, *vFile); } @@ -479,7 +484,7 @@ Installables SourceExprCommand::parseInstallables( } try { - auto [flakeRef, fragment] = parseFlakeRefWithFragment(std::string { prefix }, absPath(".")); + auto [flakeRef, fragment] = parseFlakeRefWithFragment(std::string { prefix }, absPath(getCommandBaseDir())); result.push_back(make_ref( this, getEvalState(), @@ -663,7 +668,7 @@ BuiltPaths Installable::toBuiltPaths( BuiltPaths res; for (auto & drvPath : Installable::toDerivations(store, installables, true)) - res.push_back(BuiltPath::Opaque{drvPath}); + res.emplace_back(BuiltPath::Opaque{drvPath}); return res; } } @@ -753,7 +758,7 @@ std::vector RawInstallablesCommand::getFlakeRefsForCompletion() for (auto i : rawInstallables) res.push_back(parseFlakeRefWithFragment( expandTilde(i), - absPath(".")).first); + absPath(getCommandBaseDir())).first); return res; } @@ -775,7 +780,7 @@ std::vector InstallableCommand::getFlakeRefsForCompletion() return { parseFlakeRefWithFragment( expandTilde(_installable), - absPath(".")).first + absPath(getCommandBaseDir())).first }; } diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh index b0dc0dc02..e087f935c 100644 --- a/src/libcmd/installables.hh +++ b/src/libcmd/installables.hh @@ -1,7 +1,6 @@ #pragma once ///@file -#include "util.hh" #include "path.hh" #include "outputs-spec.hh" #include "derived-path.hh" diff --git a/src/libcmd/markdown.cc b/src/libcmd/markdown.cc index 668a07763..8b3bbc1b5 100644 --- a/src/libcmd/markdown.cc +++ b/src/libcmd/markdown.cc @@ -1,6 +1,7 @@ #include "markdown.hh" #include "util.hh" #include "finally.hh" +#include "terminal.hh" #include #include diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 2e17a29a7..bf5643a5c 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -22,6 +22,7 @@ extern "C" { #include "repl.hh" #include "ansicolor.hh" +#include "signals.hh" #include "shared.hh" #include "eval.hh" #include "eval-cache.hh" @@ -36,6 +37,8 @@ extern "C" { #include "globals.hh" #include "flake/flake.hh" #include "flake/lockfile.hh" +#include "users.hh" +#include "terminal.hh" #include "editor-for.hh" #include "finally.hh" #include "markdown.hh" diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index d12345710..7481a2232 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -1,6 +1,5 @@ #include "attr-path.hh" #include "eval-inline.hh" -#include "util.hh" namespace nix { diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 10fc799a9..6c0e33709 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "eval-cache.hh" #include "sqlite.hh" #include "eval.hh" diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 93b4a5289..444a7d7d6 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "globals.hh" #include "profiles.hh" #include "eval.hh" diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index 5473d688e..db2971acb 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -1,4 +1,6 @@ #pragma once +///@file + #include "config.hh" namespace nix { diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index d26cde423..dfe81cbf7 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -14,6 +14,7 @@ #include "print.hh" #include "fs-input-accessor.hh" #include "memory-input-accessor.hh" +#include "signals.hh" #include #include diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc index e89014862..3c7ed5d8a 100644 --- a/src/libexpr/flake/config.cc +++ b/src/libexpr/flake/config.cc @@ -1,6 +1,7 @@ -#include "flake.hh" +#include "users.hh" #include "globals.hh" #include "fetch-settings.hh" +#include "flake.hh" #include diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 70ae7b584..54de53e0b 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -1,3 +1,4 @@ +#include "terminal.hh" #include "flake.hh" #include "eval.hh" #include "eval-settings.hh" @@ -8,6 +9,7 @@ #include "fetchers.hh" #include "finally.hh" #include "fetch-settings.hh" +#include "value-to-json.hh" namespace nix { @@ -140,8 +142,13 @@ static FlakeInput parseFlakeInput(EvalState & state, attrs.emplace(state.symbols[attr.name], (long unsigned int)attr.value->integer); break; default: - throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", - state.symbols[attr.name], showType(*attr.value)); + if (attr.name == state.symbols.create("publicKeys")) { + experimentalFeatureSettings.require(Xp::VerifiedFetches); + NixStringContext emptyContext = {}; + attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, emptyContext).dump()); + } else + throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", + state.symbols[attr.name], showType(*attr.value)); } #pragma GCC diagnostic pop } diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index fe3e6f7ee..d4e946d81 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -1,5 +1,4 @@ #include "get-drvs.hh" -#include "util.hh" #include "eval-inline.hh" #include "derivations.hh" #include "store-api.hh" diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 607795937..b86cef217 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -19,6 +19,7 @@ #include #include "util.hh" +#include "users.hh" #include "nixexpr.hh" #include "eval.hh" diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index e3c775d90..8d3a18526 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -10,6 +10,7 @@ #include "path-references.hh" #include "store-api.hh" #include "util.hh" +#include "processes.hh" #include "value-to-json.hh" #include "value-to-xml.hh" #include "primops.hh" @@ -824,7 +825,7 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * * auto message = state.coerceToString(pos, *args[0], context, "while evaluating the error message passed to builtins.addErrorContext", false, false).toOwned(); - e.addTrace(nullptr, message, true); + e.addTrace(nullptr, hintfmt(message), true); throw; } } diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 767f559be..8031bf809 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -7,6 +7,7 @@ #include "registry.hh" #include "tarball.hh" #include "url.hh" +#include "value-to-json.hh" #include #include @@ -125,6 +126,10 @@ static void fetchTree( attrs.emplace(state.symbols[attr.name], Explicit{attr.value->boolean}); else if (attr.value->type() == nInt) attrs.emplace(state.symbols[attr.name], uint64_t(attr.value->integer)); + else if (state.symbols[attr.name] == "publicKeys") { + experimentalFeatureSettings.require(Xp::VerifiedFetches); + attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, context).dump()); + } else state.debugThrowLastTrace(TypeError("fetchTree argument '%s' is %s while a string, Boolean or integer is expected", state.symbols[attr.name], showType(*attr.value))); @@ -223,6 +228,7 @@ static RegisterPrimOp primop_fetchTree({ ``` )", .fun = prim_fetchTree, + .experimentalFeature = Xp::FetchTree, }); static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v, @@ -427,6 +433,42 @@ static RegisterPrimOp primop_fetchGit({ With this argument being true, it's possible to load a `rev` from *any* `ref` (by default only `rev`s from the specified `ref` are supported). + - `verifyCommit` (default: `true` if `publicKey` or `publicKeys` are provided, otherwise `false`) + + Whether to check `rev` for a signature matching `publicKey` or `publicKeys`. + If `verifyCommit` is enabled, then `fetchGit` cannot use a local repository with uncommitted changes. + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `publicKey` + + The public key against which `rev` is verified if `verifyCommit` is enabled. + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `keytype` (default: `"ssh-ed25519"`) + + The key type of `publicKey`. + Possible values: + - `"ssh-dsa"` + - `"ssh-ecdsa"` + - `"ssh-ecdsa-sk"` + - `"ssh-ed25519"` + - `"ssh-ed25519-sk"` + - `"ssh-rsa"` + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `publicKeys` + + The public keys against which `rev` is verified if `verifyCommit` is enabled. + Must be given as a list of attribute sets with the following form: + ```nix + { + key = ""; + type = ""; # optional, default: "ssh-ed25519" + } + ``` + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + Here are some examples of how to use `fetchGit`. - To fetch a private repository over SSH: @@ -501,6 +543,21 @@ static RegisterPrimOp primop_fetchGit({ } ``` + - To verify the commit signature: + + ```nix + builtins.fetchGit { + url = "ssh://git@github.com/nixos/nix.git"; + verifyCommit = true; + publicKeys = [ + { + type = "ssh-ed25519"; + key = "AAAAC3NzaC1lZDI1NTE5AAAAIArPKULJOid8eS6XETwUjO48/HKBWl7FTCK0Z//fplDi"; + } + ]; + } + ``` + Nix will refetch the branch according to the [`tarball-ttl`](@docroot@/command-ref/conf-file.md#conf-tarball-ttl) setting. This behavior is disabled in [pure evaluation mode](@docroot@/command-ref/conf-file.md#conf-pure-eval). diff --git a/src/libexpr/search-path.cc b/src/libexpr/search-path.cc index 180d5f8b1..a25767496 100644 --- a/src/libexpr/search-path.cc +++ b/src/libexpr/search-path.cc @@ -1,5 +1,4 @@ #include "search-path.hh" -#include "util.hh" namespace nix { diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index cbc91f509..74b3ebf13 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -1,7 +1,7 @@ #include "value-to-json.hh" #include "eval-inline.hh" -#include "util.hh" #include "store-api.hh" +#include "signals.hh" #include #include diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index bd7a4ae30..5032115bb 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -1,7 +1,7 @@ #include "value-to-xml.hh" #include "xml-writer.hh" #include "eval-inline.hh" -#include "util.hh" +#include "signals.hh" #include diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 22361d8fa..6d9633268 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -1,3 +1,4 @@ +#include "util.hh" #include "value/context.hh" #include diff --git a/src/libexpr/value/context.hh b/src/libexpr/value/context.hh index 9f1d59317..51fd30a44 100644 --- a/src/libexpr/value/context.hh +++ b/src/libexpr/value/context.hh @@ -1,7 +1,6 @@ #pragma once ///@file -#include "util.hh" #include "comparator.hh" #include "derived-path.hh" #include "variant-wrapper.hh" diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 8a3e462d3..63b05bdab 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -1,4 +1,5 @@ #include "cache.hh" +#include "users.hh" #include "sqlite.hh" #include "sync.hh" #include "store-api.hh" diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index 6108a179c..f095963a8 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -3,7 +3,6 @@ #include "types.hh" #include "config.hh" -#include "util.hh" #include #include diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index fd0757ac3..19e089aa8 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -373,4 +373,9 @@ std::optional InputScheme::experimentalFeature() const return {}; } +std::string publicKeys_to_string(const std::vector& publicKeys) +{ + return ((nlohmann::json) publicKeys).dump(); +} + } diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 74789d5fa..6db1615f2 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -184,4 +184,13 @@ void registerInputScheme(std::shared_ptr && fetcher); nlohmann::json dumpRegisterInputSchemeInfo(); +struct PublicKey +{ + std::string type = "ssh-ed25519"; + std::string key; +}; +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(PublicKey, type, key) + +std::string publicKeys_to_string(const std::vector&); + } diff --git a/src/libfetchers/git-utils.cc b/src/libfetchers/git-utils.cc index 1cd300d75..3a0e2d02f 100644 --- a/src/libfetchers/git-utils.cc +++ b/src/libfetchers/git-utils.cc @@ -2,6 +2,7 @@ #include "input-accessor.hh" #include "cache.hh" #include "finally.hh" +#include "processes.hh" #include @@ -21,6 +22,7 @@ #include #include +#include namespace std { @@ -365,6 +367,64 @@ struct GitRepoImpl : GitRepo, std::enable_shared_from_this refspec }, {}, true); } + + void verifyCommit( + const Hash & rev, + const std::vector & publicKeys) override + { + // Create ad-hoc allowedSignersFile and populate it with publicKeys + auto allowedSignersFile = createTempFile().second; + std::string allowedSigners; + for (const fetchers::PublicKey & k : publicKeys) { + if (k.type != "ssh-dsa" + && k.type != "ssh-ecdsa" + && k.type != "ssh-ecdsa-sk" + && k.type != "ssh-ed25519" + && k.type != "ssh-ed25519-sk" + && k.type != "ssh-rsa") + throw Error("Unknown key type '%s'.\n" + "Please use one of\n" + "- ssh-dsa\n" + " ssh-ecdsa\n" + " ssh-ecdsa-sk\n" + " ssh-ed25519\n" + " ssh-ed25519-sk\n" + " ssh-rsa", k.type); + allowedSigners += "* " + k.type + " " + k.key + "\n"; + } + writeFile(allowedSignersFile, allowedSigners); + + // Run verification command + auto [status, output] = runProgram(RunOptions { + .program = "git", + .args = { + "-c", + "gpg.ssh.allowedSignersFile=" + allowedSignersFile, + "-C", path.abs(), + "verify-commit", + rev.gitRev() + }, + .mergeStderrToStdout = true, + }); + + /* Evaluate result through status code and checking if public + key fingerprints appear on stderr. This is neccessary + because the git command might also succeed due to the + commit being signed by gpg keys that are present in the + users key agent. */ + std::string re = R"(Good "git" signature for \* with .* key SHA256:[)"; + for (const fetchers::PublicKey & k : publicKeys){ + // Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally + auto fingerprint = trim(hashString(htSHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "="); + auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" ); + re += "(" + escaped_fingerprint + ")"; + } + re += "]"; + if (status == 0 && std::regex_search(output, std::regex(re))) + printTalkative("Signature verification on commit %s succeeded.", rev.gitRev()); + else + throw Error("Commit signature verification on commit %s failed: %s", rev.gitRev(), output); + } }; ref GitRepo::openRepo(const CanonPath & path, bool create, bool bare) diff --git a/src/libfetchers/git-utils.hh b/src/libfetchers/git-utils.hh index a425e5814..7efbdedce 100644 --- a/src/libfetchers/git-utils.hh +++ b/src/libfetchers/git-utils.hh @@ -4,6 +4,8 @@ namespace nix { +namespace fetchers { struct PublicKey; } + struct GitRepo { virtual ~GitRepo() @@ -72,6 +74,14 @@ struct GitRepo virtual void fetch( const std::string & url, const std::string & refspec) = 0; + + /** + * Verify that commit `rev` is signed by one of the keys in + * `publicKeys`. Throw an error if it isn't. + */ + virtual void verifyCommit( + const Hash & rev, + const std::vector & publicKeys) = 0; }; } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 9d4f4addc..9c2a7df16 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -1,11 +1,12 @@ #include "fetchers.hh" +#include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" #include "store-api.hh" #include "url-parts.hh" #include "pathlocks.hh" -#include "util.hh" +#include "processes.hh" #include "git.hh" #include "fs-input-accessor.hh" #include "union-input-accessor.hh" @@ -135,6 +136,19 @@ std::optional readHeadCached(const std::string & actualUrl) return std::nullopt; } +std::vector getPublicKeys(const Attrs & attrs) +{ + std::vector publicKeys; + if (attrs.contains("publicKeys")) { + nlohmann::json publicKeysJson = nlohmann::json::parse(getStrAttr(attrs, "publicKeys")); + ensureType(publicKeysJson, nlohmann::json::value_t::array); + publicKeys = publicKeysJson.get>(); + } + if (attrs.contains("publicKey")) + publicKeys.push_back(PublicKey{maybeGetStrAttr(attrs, "keytype").value_or("ssh-ed25519"),getStrAttr(attrs, "publicKey")}); + return publicKeys; +} + } // end namespace struct GitInputScheme : InputScheme @@ -155,9 +169,9 @@ struct GitInputScheme : InputScheme attrs.emplace("type", "git"); for (auto & [name, value] : url.query) { - if (name == "rev" || name == "ref") + if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys") attrs.emplace(name, value); - else if (name == "shallow" || name == "submodules" || name == "allRefs") + else if (name == "shallow" || name == "submodules" || name == "allRefs" || name == "verifyCommit") attrs.emplace(name, Explicit { value == "1" }); else url2.query.emplace(name, value); @@ -189,14 +203,26 @@ struct GitInputScheme : InputScheme "name", "dirtyRev", "dirtyShortRev", + "verifyCommit", + "keytype", + "publicKey", + "publicKeys", }; } std::optional inputFromAttrs(const Attrs & attrs) const override { + for (auto & [name, _] : attrs) + if (name == "verifyCommit" + || name == "keytype" + || name == "publicKey" + || name == "publicKeys") + experimentalFeatureSettings.require(Xp::VerifiedFetches); + maybeGetBoolAttr(attrs, "shallow"); maybeGetBoolAttr(attrs, "submodules"); maybeGetBoolAttr(attrs, "allRefs"); + maybeGetBoolAttr(attrs, "verifyCommit"); if (auto ref = maybeGetStrAttr(attrs, "ref")) { if (std::regex_search(*ref, badGitRefRegex)) @@ -219,6 +245,15 @@ struct GitInputScheme : InputScheme if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); if (maybeGetBoolAttr(input.attrs, "shallow").value_or(false)) url.query.insert_or_assign("shallow", "1"); + if (maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(false)) + url.query.insert_or_assign("verifyCommit", "1"); + auto publicKeys = getPublicKeys(input.attrs); + if (publicKeys.size() == 1) { + url.query.insert_or_assign("keytype", publicKeys.at(0).type); + url.query.insert_or_assign("publicKey", publicKeys.at(0).key); + } + else if (publicKeys.size() > 1) + url.query.insert_or_assign("publicKeys", publicKeys_to_string(publicKeys)); return url; } @@ -416,6 +451,19 @@ struct GitInputScheme : InputScheme }; } + void verifyCommit(const Input & input, std::shared_ptr repo) const + { + auto publicKeys = getPublicKeys(input.attrs); + auto verifyCommit = maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(!publicKeys.empty()); + + if (verifyCommit) { + if (input.getRev() && repo) + repo->verifyCommit(*input.getRev(), publicKeys); + else + throw Error("commit verification is required for Git repository '%s', but it's dirty", input.to_string()); + } + } + std::pair, Input> getAccessorFromCommit( ref store, RepoInfo & repoInfo, @@ -513,7 +561,9 @@ struct GitInputScheme : InputScheme // cache dir lock is removed at scope end; we will only use read-only operations on specific revisions in the remainder } - auto isShallow = GitRepo::openRepo(CanonPath(repoDir))->isShallow(); + auto repo = GitRepo::openRepo(CanonPath(repoDir)); + + auto isShallow = repo->isShallow(); if (isShallow && !repoInfo.shallow) throw Error("'%s' is a shallow Git repository, but shallow repositories are only allowed when `shallow = true;` is specified", repoInfo.url); @@ -533,7 +583,7 @@ struct GitInputScheme : InputScheme printTalkative("using revision %s of repo '%s'", rev.gitRev(), repoInfo.url); - auto repo = GitRepo::openRepo(CanonPath(repoDir)); + verifyCommit(input, repo); auto accessor = repo->getAccessor(rev); @@ -565,8 +615,7 @@ struct GitInputScheme : InputScheme } } - assert(input.getRev()); - assert(!origRev || origRev == input.getRev()); + assert(!origRev || origRev == rev); if (!repoInfo.shallow) input.attrs.insert_or_assign("revCount", getIntAttr(infoAttrs, "revCount")); input.attrs.insert_or_assign("lastModified", getIntAttr(infoAttrs, "lastModified")); @@ -615,14 +664,17 @@ struct GitInputScheme : InputScheme } if (!repoInfo.workdirInfo.isDirty) { - if (auto ref = GitRepo::openRepo(CanonPath(repoInfo.url))->getWorkdirRef()) + auto repo = GitRepo::openRepo(CanonPath(repoInfo.url)); + + if (auto ref = repo->getWorkdirRef()) input.attrs.insert_or_assign("ref", *ref); auto rev = repoInfo.workdirInfo.headRev.value(); input.attrs.insert_or_assign("rev", rev.gitRev()); - input.attrs.insert_or_assign("revCount", getRevCount(repoInfo, repoInfo.url, rev)); + + verifyCommit(input, repo); } else { repoInfo.warnDirty(); @@ -632,6 +684,8 @@ struct GitInputScheme : InputScheme input.attrs.insert_or_assign("dirtyShortRev", repoInfo.workdirInfo.headRev->gitShortRev() + "-dirty"); } + + verifyCommit(input, nullptr); } input.attrs.insert_or_assign( @@ -651,10 +705,10 @@ struct GitInputScheme : InputScheme auto repoInfo = getRepoInfo(input); - if (input.getRef() || input.getRev() || !repoInfo.isLocal) - return getAccessorFromCommit(store, repoInfo, std::move(input)); - else - return getAccessorFromWorkdir(store, repoInfo, std::move(input)); + return + input.getRef() || input.getRev() || !repoInfo.isLocal + ? getAccessorFromCommit(store, repoInfo, std::move(input)) + : getAccessorFromWorkdir(store, repoInfo, std::move(input)); } }; diff --git a/src/libfetchers/input-accessor.hh b/src/libfetchers/input-accessor.hh index 5dc05a363..9c688a234 100644 --- a/src/libfetchers/input-accessor.hh +++ b/src/libfetchers/input-accessor.hh @@ -1,8 +1,10 @@ #pragma once +///@file #include "source-accessor.hh" #include "ref.hh" #include "types.hh" +#include "file-system.hh" #include "repair-flag.hh" #include "content-address.hh" @@ -14,7 +16,7 @@ struct SourcePath; class StorePath; class Store; -struct InputAccessor : SourceAccessor, std::enable_shared_from_this +struct InputAccessor : virtual SourceAccessor, std::enable_shared_from_this { /** * Return the maximum last-modified time of the files in this diff --git a/src/libfetchers/memory-input-accessor.cc b/src/libfetchers/memory-input-accessor.cc index 6468ece41..057f3e37f 100644 --- a/src/libfetchers/memory-input-accessor.cc +++ b/src/libfetchers/memory-input-accessor.cc @@ -1,48 +1,16 @@ #include "memory-input-accessor.hh" +#include "memory-source-accessor.hh" namespace nix { -struct MemoryInputAccessorImpl : MemoryInputAccessor +struct MemoryInputAccessorImpl : MemoryInputAccessor, MemorySourceAccessor { - std::map files; - - std::string readFile(const CanonPath & path) override - { - auto i = files.find(path); - if (i == files.end()) - throw Error("file '%s' does not exist", path); - return i->second; - } - - bool pathExists(const CanonPath & path) override - { - auto i = files.find(path); - return i != files.end(); - } - - std::optional maybeLstat(const CanonPath & path) override - { - auto i = files.find(path); - if (i != files.end()) - return Stat { .type = tRegular, .isExecutable = false }; - return std::nullopt; - } - - DirEntries readDirectory(const CanonPath & path) override - { - return {}; - } - - std::string readLink(const CanonPath & path) override - { - throw UnimplementedError("MemoryInputAccessor::readLink"); - } - SourcePath addFile(CanonPath path, std::string && contents) override { - files.emplace(path, std::move(contents)); - - return {ref(shared_from_this()), std::move(path)}; + return { + ref(shared_from_this()), + MemorySourceAccessor::addFile(path, std::move(contents)) + }; } }; diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index eda33dfe7..9244acf39 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -1,4 +1,6 @@ #include "fetchers.hh" +#include "processes.hh" +#include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index a0fff9ceb..9c7bc0cfe 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -1,6 +1,6 @@ #include "registry.hh" #include "tarball.hh" -#include "util.hh" +#include "users.hh" #include "globals.hh" #include "store-api.hh" #include "local-fs-store.hh" diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index 205b77808..5b49aaabc 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -1,7 +1,9 @@ #include "common-args.hh" #include "args/root.hh" #include "globals.hh" +#include "logging.hh" #include "loggers.hh" +#include "util.hh" namespace nix { diff --git a/src/libmain/loggers.cc b/src/libmain/loggers.cc index cda5cb939..9829859de 100644 --- a/src/libmain/loggers.cc +++ b/src/libmain/loggers.cc @@ -1,6 +1,6 @@ #include "loggers.hh" +#include "environment-variables.hh" #include "progress-bar.hh" -#include "util.hh" namespace nix { diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 45b1fdfd1..a7aee47c3 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -1,5 +1,5 @@ #include "progress-bar.hh" -#include "util.hh" +#include "terminal.hh" #include "sync.hh" #include "store-api.hh" #include "names.hh" diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 9c2ad039a..862ef355b 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -1,10 +1,11 @@ #include "globals.hh" +#include "current-process.hh" #include "shared.hh" #include "store-api.hh" #include "gc-store.hh" -#include "util.hh" #include "loggers.hh" #include "progress-bar.hh" +#include "signals.hh" #include #include diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 3159fe479..c68f6cd83 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -1,7 +1,7 @@ #pragma once ///@file -#include "util.hh" +#include "processes.hh" #include "args.hh" #include "args/root.hh" #include "common-args.hh" diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 6a52c4c51..ae483c95e 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -11,6 +11,7 @@ #include "nar-accessor.hh" #include "thread-pool.hh" #include "callback.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/build/child.cc b/src/libstore/build/child.cc new file mode 100644 index 000000000..aa31c3caf --- /dev/null +++ b/src/libstore/build/child.cc @@ -0,0 +1,37 @@ +#include "child.hh" +#include "current-process.hh" +#include "logging.hh" + +#include +#include + +namespace nix { + +void commonChildInit() +{ + logger = makeSimpleLogger(); + + const static std::string pathNullDevice = "/dev/null"; + restoreProcessContext(false); + + /* Put the child in a separate session (and thus a separate + process group) so that it has no controlling terminal (meaning + that e.g. ssh cannot open /dev/tty) and it doesn't receive + terminal signals. */ + if (setsid() == -1) + throw SysError("creating a new session"); + + /* Dup stderr to stdout. */ + if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) + throw SysError("cannot dup stderr into stdout"); + + /* Reroute stdin to /dev/null. */ + int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); + if (fdDevNull == -1) + throw SysError("cannot open '%1%'", pathNullDevice); + if (dup2(fdDevNull, STDIN_FILENO) == -1) + throw SysError("cannot dup null device into stdin"); + close(fdDevNull); +} + +} diff --git a/src/libstore/build/child.hh b/src/libstore/build/child.hh new file mode 100644 index 000000000..3dfc552b9 --- /dev/null +++ b/src/libstore/build/child.hh @@ -0,0 +1,11 @@ +#pragma once +///@file + +namespace nix { + +/** + * Common initialisation performed in child processes. + */ +void commonChildInit(); + +} diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc index 337c60bd4..5d045ec3d 100644 --- a/src/libstore/build/hook-instance.cc +++ b/src/libstore/build/hook-instance.cc @@ -1,5 +1,7 @@ #include "globals.hh" #include "hook-instance.hh" +#include "file-system.hh" +#include "child.hh" namespace nix { diff --git a/src/libstore/build/hook-instance.hh b/src/libstore/build/hook-instance.hh index d84f62877..61cf534f4 100644 --- a/src/libstore/build/hook-instance.hh +++ b/src/libstore/build/hook-instance.hh @@ -3,6 +3,7 @@ #include "logging.hh" #include "serialise.hh" +#include "processes.hh" namespace nix { diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index dcb7dc6bc..adb011e30 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -15,7 +15,10 @@ #include "json-utils.hh" #include "cgroup.hh" #include "personality.hh" +#include "current-process.hh" #include "namespaces.hh" +#include "child.hh" +#include "unix-domain-socket.hh" #include #include @@ -1620,6 +1623,8 @@ void setupSeccomp() seccomp_release(ctx); }); + constexpr std::string_view nativeSystem = SYSTEM; + if (nativeSystem == "x86_64-linux" && seccomp_arch_add(ctx, SCMP_ARCH_X86) != 0) throw SysError("unable to add 32-bit seccomp architecture"); diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh index 1cb68a869..88152a645 100644 --- a/src/libstore/build/local-derivation-goal.hh +++ b/src/libstore/build/local-derivation-goal.hh @@ -3,6 +3,7 @@ #include "derivation-goal.hh" #include "local-store.hh" +#include "processes.hh" namespace nix { diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 37cb86b91..01914e2d6 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "drv-output-substitution-goal.hh" #include "local-derivation-goal.hh" #include "hook-instance.hh" +#include "signals.hh" #include diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index f906814bc..68445258f 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc index 1027469c9..1b705733c 100644 --- a/src/libstore/crypto.cc +++ b/src/libstore/crypto.cc @@ -1,4 +1,5 @@ #include "crypto.hh" +#include "file-system.hh" #include "util.hh" #include "globals.hh" diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index efdad18e1..1fecd1c97 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -352,7 +352,7 @@ Derivation parseDerivation( expect(str, "erive("); version = DerivationATermVersion::Traditional; break; - case 'r': + case 'r': { expect(str, "rvWithVersion("); auto versionS = parseString(str); if (versionS == "xp-dyn-drv") { @@ -365,6 +365,9 @@ Derivation parseDerivation( expect(str, ","); break; } + default: + throw Error("derivation does not start with 'Derive' or 'DrvWithVersion'"); + } /* Parse the list of outputs. */ expect(str, "["); diff --git a/src/libstore/derived-path-map.cc b/src/libstore/derived-path-map.cc index 5982c04b3..4c1ea417a 100644 --- a/src/libstore/derived-path-map.cc +++ b/src/libstore/derived-path-map.cc @@ -1,4 +1,5 @@ #include "derived-path-map.hh" +#include "util.hh" namespace nix { diff --git a/src/libstore/derived-path-map.hh b/src/libstore/derived-path-map.hh index 4d72b301e..393cdedf7 100644 --- a/src/libstore/derived-path-map.hh +++ b/src/libstore/derived-path-map.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "derived-path.hh" diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh index 4d7033df2..6c5dfeed9 100644 --- a/src/libstore/derived-path.hh +++ b/src/libstore/derived-path.hh @@ -1,10 +1,10 @@ #pragma once ///@file -#include "util.hh" #include "path.hh" #include "outputs-spec.hh" #include "comparator.hh" +#include "config.hh" #include diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index a283af5a2..dcbec4acd 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -1,11 +1,12 @@ #include "filetransfer.hh" -#include "util.hh" +#include "namespaces.hh" #include "globals.hh" #include "store-api.hh" #include "s3.hh" #include "compression.hh" #include "finally.hh" #include "callback.hh" +#include "signals.hh" #if ENABLE_S3 #include diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index fb7895817..8d05ae4bd 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -2,6 +2,13 @@ #include "globals.hh" #include "local-store.hh" #include "finally.hh" +#include "unix-domain-socket.hh" +#include "signals.hh" + +#if !defined(__linux__) +// For shelling out to lsof +# include "processes.hh" +#endif #include #include diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 9c25d9868..cc416a4d6 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -1,7 +1,8 @@ #include "globals.hh" -#include "util.hh" +#include "current-process.hh" #include "archive.hh" #include "args.hh" +#include "users.hh" #include "abstract-setting-to-json.hh" #include "compute-levels.hh" @@ -17,9 +18,13 @@ #include #ifdef __GLIBC__ -#include -#include -#include +# include +# include +# include +#endif + +#if __APPLE__ +# include "processes.hh" #endif #include "config-impl.hh" diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 12fb48d93..8e034f5a9 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -3,7 +3,7 @@ #include "types.hh" #include "config.hh" -#include "util.hh" +#include "environment-variables.hh" #include "experimental-features.hh" #include diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index a5e9426f8..2a3582ad8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -10,6 +10,7 @@ #include "topo-sort.hh" #include "finally.hh" #include "compression.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index fe26a0f27..6d589bee5 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -7,7 +7,6 @@ #include "store-api.hh" #include "indirect-root-store.hh" #include "sync.hh" -#include "util.hh" #include #include diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc index 165e4969f..87f55ce49 100644 --- a/src/libstore/lock.cc +++ b/src/libstore/lock.cc @@ -1,4 +1,5 @@ #include "lock.hh" +#include "file-system.hh" #include "globals.hh" #include "pathlocks.hh" diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc index e87f46980..512115893 100644 --- a/src/libstore/machines.cc +++ b/src/libstore/machines.cc @@ -1,5 +1,4 @@ #include "machines.hh" -#include "util.hh" #include "globals.hh" #include "store-api.hh" diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 02993680f..15b05fe25 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -60,12 +60,22 @@ struct NarAccessor : public SourceAccessor void createDirectory(const Path & path) override { - createMember(path, {Type::tDirectory, false, 0, 0}); + createMember(path, NarMember{ .stat = { + .type = Type::tDirectory, + .fileSize = 0, + .isExecutable = false, + .narOffset = 0 + } }); } void createRegularFile(const Path & path) override { - createMember(path, {Type::tRegular, false, 0, 0}); + createMember(path, NarMember{ .stat = { + .type = Type::tRegular, + .fileSize = 0, + .isExecutable = false, + .narOffset = 0 + } }); } void closeRegularFile() override @@ -78,9 +88,8 @@ struct NarAccessor : public SourceAccessor void preallocateContents(uint64_t size) override { - assert(size <= std::numeric_limits::max()); auto & st = parents.top()->stat; - st.fileSize = (uint64_t) size; + st.fileSize = size; st.narOffset = pos; } @@ -128,9 +137,8 @@ struct NarAccessor : public SourceAccessor if (type == "directory") { member.stat = {.type = Type::tDirectory}; - for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) { - std::string name = i.key(); - recurse(member.children[name], i.value()); + for (const auto &[name, function] : v["entries"].items()) { + recurse(member.children[name], function); } } else if (type == "regular") { member.stat = { @@ -153,7 +161,7 @@ struct NarAccessor : public SourceAccessor { NarMember * current = &root; - for (auto & i : path) { + for (const auto & i : path) { if (current->stat.type != Type::tDirectory) return nullptr; auto child = current->children.find(std::string(i)); if (child == current->children.end()) return nullptr; @@ -186,7 +194,7 @@ struct NarAccessor : public SourceAccessor throw Error("path '%1%' inside NAR file is not a directory", path); DirEntries res; - for (auto & child : i.children) + for (const auto & child : i.children) res.insert_or_assign(child.first, std::nullopt); return res; @@ -251,7 +259,7 @@ json listNar(ref accessor, const CanonPath & path, bool recurse) { obj["entries"] = json::object(); json &res2 = obj["entries"]; - for (auto & [name, type] : accessor->readDirectory(path)) { + for (const auto & [name, type] : accessor->readDirectory(path)) { if (recurse) { res2[name] = listNar(accessor, path + name, true); } else diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh index 433774524..0043897c6 100644 --- a/src/libstore/nar-accessor.hh +++ b/src/libstore/nar-accessor.hh @@ -25,7 +25,7 @@ ref makeNarAccessor(Source & source); * readFile() method of the accessor to get the contents of files * inside the NAR. */ -typedef std::function GetNarBytes; +using GetNarBytes = std::function; ref makeLazyNarAccessor( const std::string & listing, diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index cdbcf7e74..e50c15939 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -1,4 +1,5 @@ #include "nar-info-disk-cache.hh" +#include "users.hh" #include "sync.hh" #include "sqlite.hh" #include "globals.hh" diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index ee2ddfd81..1060a6c8b 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -4,6 +4,15 @@ namespace nix { +GENERATE_CMP_EXT( + , + NarInfo, + me->url, + me->compression, + me->fileHash, + me->fileSize, + static_cast(*me)); + NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence) : ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack { @@ -29,12 +38,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & while (pos < s.size()) { size_t colon = s.find(':', pos); - if (colon == std::string::npos) throw corrupt("expecting ':'"); + if (colon == s.npos) throw corrupt("expecting ':'"); std::string name(s, pos, colon - pos); size_t eol = s.find('\n', colon + 2); - if (eol == std::string::npos) throw corrupt("expecting '\\n'"); + if (eol == s.npos) throw corrupt("expecting '\\n'"); std::string value(s, colon + 2, eol - colon - 2); @@ -125,4 +134,59 @@ std::string NarInfo::to_string(const Store & store) const return res; } +nlohmann::json NarInfo::toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const +{ + using nlohmann::json; + + auto jsonObject = ValidPathInfo::toJSON(store, includeImpureInfo, hashFormat); + + if (includeImpureInfo) { + if (!url.empty()) + jsonObject["url"] = url; + if (!compression.empty()) + jsonObject["compression"] = compression; + if (fileHash) + jsonObject["downloadHash"] = fileHash->to_string(hashFormat, true); + if (fileSize) + jsonObject["downloadSize"] = fileSize; + } + + return jsonObject; +} + +NarInfo NarInfo::fromJSON( + const Store & store, + const StorePath & path, + const nlohmann::json & json) +{ + using nlohmann::detail::value_t; + + NarInfo res { + ValidPathInfo { + path, + UnkeyedValidPathInfo::fromJSON(store, json), + } + }; + + if (json.contains("url")) + res.url = ensureType(valueAt(json, "url"), value_t::string); + + if (json.contains("compression")) + res.compression = ensureType(valueAt(json, "compression"), value_t::string); + + if (json.contains("downloadHash")) + res.fileHash = Hash::parseAny( + static_cast( + ensureType(valueAt(json, "downloadHash"), value_t::string)), + std::nullopt); + + if (json.contains("downloadSize")) + res.fileSize = ensureType(valueAt(json, "downloadSize"), value_t::number_integer); + + return res; +} + } diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh index 5dbdafac3..fd538a7cd 100644 --- a/src/libstore/nar-info.hh +++ b/src/libstore/nar-info.hh @@ -17,14 +17,25 @@ struct NarInfo : ValidPathInfo uint64_t fileSize = 0; NarInfo() = delete; - NarInfo(const Store & store, std::string && name, ContentAddressWithReferences && ca, Hash narHash) + NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash) : ValidPathInfo(store, std::move(name), std::move(ca), narHash) { } - NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } + NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } NarInfo(const Store & store, const std::string & s, const std::string & whence); + DECLARE_CMP(NarInfo); + std::string to_string(const Store & store) const; + + nlohmann::json toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const override; + static NarInfo fromJSON( + const Store & store, + const StorePath & path, + const nlohmann::json & json); }; } diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 23c6a41e4..a4ac413b3 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -1,6 +1,6 @@ -#include "util.hh" #include "local-store.hh" #include "globals.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 1d900c272..73e55a96c 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -132,6 +132,41 @@ bool ParsedDerivation::useUidRange() const static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); +/** + * Write a JSON representation of store object metadata, such as the + * hash and the references. + */ +static nlohmann::json pathInfoToJSON( + Store & store, + const StorePathSet & storePaths) +{ + nlohmann::json::array_t jsonList = nlohmann::json::array(); + + for (auto & storePath : storePaths) { + auto info = store.queryPathInfo(storePath); + + auto & jsonPath = jsonList.emplace_back( + info->toJSON(store, false, HashFormat::Base32)); + + // Add the path to the object whose metadata we are including. + jsonPath["path"] = store.printStorePath(storePath); + + jsonPath["valid"] = true; + + jsonPath["closureSize"] = ({ + uint64_t totalNarSize = 0; + StorePathSet closure; + store.computeFSClosure(info->path, closure, false, false); + for (auto & p : closure) { + auto info = store.queryPathInfo(p); + totalNarSize += info->narSize; + } + totalNarSize; + }); + } + return jsonList; +} + std::optional ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths) { auto structuredAttrs = getStructuredAttrs(); @@ -152,8 +187,8 @@ std::optional ParsedDerivation::prepareStructuredAttrs(Store & s StorePathSet storePaths; for (auto & p : *i) storePaths.insert(store.parseStorePath(p.get())); - json[i.key()] = store.pathInfoToJSON( - store.exportReferences(storePaths, inputPaths), false, true); + json[i.key()] = pathInfoToJSON(store, + store.exportReferences(storePaths, inputPaths)); } } diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index ab39e71f4..2d7dc972f 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -1,5 +1,8 @@ +#include + #include "path-info.hh" #include "store-api.hh" +#include "json-utils.hh" namespace nix { @@ -144,4 +147,94 @@ ValidPathInfo::ValidPathInfo( }, std::move(ca).raw); } + +nlohmann::json UnkeyedValidPathInfo::toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const +{ + using nlohmann::json; + + auto jsonObject = json::object(); + + jsonObject["narHash"] = narHash.to_string(hashFormat, true); + jsonObject["narSize"] = narSize; + + { + auto& jsonRefs = (jsonObject["references"] = json::array()); + for (auto & ref : references) + jsonRefs.emplace_back(store.printStorePath(ref)); + } + + if (ca) + jsonObject["ca"] = renderContentAddress(ca); + + if (includeImpureInfo) { + if (deriver) + jsonObject["deriver"] = store.printStorePath(*deriver); + + if (registrationTime) + jsonObject["registrationTime"] = registrationTime; + + if (ultimate) + jsonObject["ultimate"] = ultimate; + + if (!sigs.empty()) { + for (auto & sig : sigs) + jsonObject["signatures"].push_back(sig); + } + } + + return jsonObject; +} + +UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON( + const Store & store, + const nlohmann::json & json) +{ + using nlohmann::detail::value_t; + + UnkeyedValidPathInfo res { + Hash(Hash::dummy), + }; + + ensureType(json, value_t::object); + res.narHash = Hash::parseAny( + static_cast( + ensureType(valueAt(json, "narHash"), value_t::string)), + std::nullopt); + res.narSize = ensureType(valueAt(json, "narSize"), value_t::number_integer); + + try { + auto & references = ensureType(valueAt(json, "references"), value_t::array); + for (auto & input : references) + res.references.insert(store.parseStorePath(static_cast +(input))); + } catch (Error & e) { + e.addTrace({}, "while reading key 'references'"); + throw; + } + + if (json.contains("ca")) + res.ca = ContentAddress::parse( + static_cast( + ensureType(valueAt(json, "ca"), value_t::string))); + + if (json.contains("deriver")) + res.deriver = store.parseStorePath( + static_cast( + ensureType(valueAt(json, "deriver"), value_t::string))); + + if (json.contains("registrationTime")) + res.registrationTime = ensureType(valueAt(json, "registrationTime"), value_t::number_integer); + + if (json.contains("ultimate")) + res.ultimate = ensureType(valueAt(json, "ultimate"), value_t::boolean); + + if (json.contains("signatures")) + res.sigs = valueAt(json, "signatures"); + + return res; +} + } diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh index c4c4a6366..077abc7e1 100644 --- a/src/libstore/path-info.hh +++ b/src/libstore/path-info.hh @@ -78,6 +78,18 @@ struct UnkeyedValidPathInfo DECLARE_CMP(UnkeyedValidPathInfo); virtual ~UnkeyedValidPathInfo() { } + + /** + * @param includeImpureInfo If true, variable elements such as the + * registration time are included. + */ + virtual nlohmann::json toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const; + static UnkeyedValidPathInfo fromJSON( + const Store & store, + const nlohmann::json & json); }; struct ValidPathInfo : UnkeyedValidPathInfo { diff --git a/src/libstore/path-references.cc b/src/libstore/path-references.cc index 33cf66ce3..274b596c0 100644 --- a/src/libstore/path-references.cc +++ b/src/libstore/path-references.cc @@ -1,6 +1,5 @@ #include "path-references.hh" #include "hash.hh" -#include "util.hh" #include "archive.hh" #include diff --git a/src/libstore/path-references.hh b/src/libstore/path-references.hh index 7b44e3261..0553003f8 100644 --- a/src/libstore/path-references.hh +++ b/src/libstore/path-references.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "references.hh" #include "path.hh" diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index adc763e6a..2b5b8dfe7 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -1,6 +1,7 @@ #include "pathlocks.hh" #include "util.hh" #include "sync.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 4921df352..7fcfa2e40 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -1,7 +1,7 @@ #pragma once ///@file -#include "util.hh" +#include "file-descriptor.hh" namespace nix { diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index 239047dd6..e8b88693d 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -1,7 +1,7 @@ #include "profiles.hh" #include "store-api.hh" #include "local-fs-store.hh" -#include "util.hh" +#include "users.hh" #include #include diff --git a/src/libstore/remote-store-connection.hh b/src/libstore/remote-store-connection.hh index e4a9cacb9..44328b06b 100644 --- a/src/libstore/remote-store-connection.hh +++ b/src/libstore/remote-store-connection.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "remote-store.hh" #include "worker-protocol.hh" #include "pool.hh" diff --git a/src/libstore/serve-protocol.cc b/src/libstore/serve-protocol.cc index 97a0ddf0e..9bfcc279c 100644 --- a/src/libstore/serve-protocol.cc +++ b/src/libstore/serve-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 7c8decb74..d7432a305 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -2,6 +2,7 @@ #include "globals.hh" #include "util.hh" #include "url.hh" +#include "signals.hh" #include diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index da32f1b79..5c8d6a504 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -1,5 +1,8 @@ #include "ssh.hh" #include "finally.hh" +#include "current-process.hh" +#include "environment-variables.hh" +#include "util.hh" namespace nix { @@ -111,8 +114,10 @@ std::unique_ptr SSHMaster::startCommand(const std::string reply = readLine(out.readSide.get()); } catch (EndOfFile & e) { } - if (reply != "started") + if (reply != "started") { + printTalkative("SSH stdout first line: %s", reply); throw Error("failed to start SSH connection to '%s'", host); + } } conn->out = std::move(out.readSide); @@ -129,7 +134,6 @@ Path SSHMaster::startMaster() if (state->sshMaster != -1) return state->socketPath; - state->socketPath = (Path) *state->tmpDir + "/ssh.sock"; Pipe out; @@ -141,7 +145,8 @@ Path SSHMaster::startMaster() logger->pause(); Finally cleanup = [&]() { logger->resume(); }; - bool wasMasterRunning = isMasterRunning(); + if (isMasterRunning()) + return state->socketPath; state->sshMaster = startProcess([&]() { restoreProcessContext(); @@ -162,14 +167,14 @@ Path SSHMaster::startMaster() out.writeSide = -1; - if (!wasMasterRunning) { - std::string reply; - try { - reply = readLine(out.readSide.get()); - } catch (EndOfFile & e) { } + std::string reply; + try { + reply = readLine(out.readSide.get()); + } catch (EndOfFile & e) { } - if (reply != "started") - throw Error("failed to start SSH master connection to '%s'", host); + if (reply != "started") { + printTalkative("SSH master stdout first line: %s", reply); + throw Error("failed to start SSH master connection to '%s'", host); } return state->socketPath; diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 94b952af9..bfcd6f21c 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -1,8 +1,9 @@ #pragma once ///@file -#include "util.hh" #include "sync.hh" +#include "processes.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index ac96e8bb1..0f88d9b92 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -14,6 +14,8 @@ // FIXME this should not be here, see TODO below on // `addMultipleToStore`. #include "worker-protocol.hh" +#include "signals.hh" +#include "users.hh" #include #include @@ -819,7 +821,7 @@ void Store::substitutePaths(const StorePathSet & paths) std::vector paths2; for (auto & path : paths) if (!path.isDerivation()) - paths2.push_back(DerivedPath::Opaque{path}); + paths2.emplace_back(DerivedPath::Opaque{path}); uint64_t downloadSize, narSize; StorePathSet willBuild, willSubstitute, unknown; queryMissing(paths2, @@ -949,96 +951,6 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor return paths; } -json Store::pathInfoToJSON(const StorePathSet & storePaths, - bool includeImpureInfo, bool showClosureSize, - HashFormat hashFormat, - AllowInvalidFlag allowInvalid) -{ - json::array_t jsonList = json::array(); - - for (auto & storePath : storePaths) { - auto& jsonPath = jsonList.emplace_back(json::object()); - - try { - auto info = queryPathInfo(storePath); - - jsonPath["path"] = printStorePath(info->path); - jsonPath["valid"] = true; - jsonPath["narHash"] = info->narHash.to_string(hashFormat, true); - jsonPath["narSize"] = info->narSize; - - { - auto& jsonRefs = (jsonPath["references"] = json::array()); - for (auto & ref : info->references) - jsonRefs.emplace_back(printStorePath(ref)); - } - - if (info->ca) - jsonPath["ca"] = renderContentAddress(info->ca); - - std::pair closureSizes; - - if (showClosureSize) { - closureSizes = getClosureSize(info->path); - jsonPath["closureSize"] = closureSizes.first; - } - - if (includeImpureInfo) { - - if (info->deriver) - jsonPath["deriver"] = printStorePath(*info->deriver); - - if (info->registrationTime) - jsonPath["registrationTime"] = info->registrationTime; - - if (info->ultimate) - jsonPath["ultimate"] = info->ultimate; - - if (!info->sigs.empty()) { - for (auto & sig : info->sigs) - jsonPath["signatures"].push_back(sig); - } - - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - - if (narInfo) { - if (!narInfo->url.empty()) - jsonPath["url"] = narInfo->url; - if (narInfo->fileHash) - jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashFormat, true); - if (narInfo->fileSize) - jsonPath["downloadSize"] = narInfo->fileSize; - if (showClosureSize) - jsonPath["closureDownloadSize"] = closureSizes.second; - } - } - - } catch (InvalidPath &) { - jsonPath["path"] = printStorePath(storePath); - jsonPath["valid"] = false; - } - } - return jsonList; -} - - -std::pair Store::getClosureSize(const StorePath & storePath) -{ - uint64_t totalNarSize = 0, totalDownloadSize = 0; - StorePathSet closure; - computeFSClosure(storePath, closure, false, false); - for (auto & p : closure) { - auto info = queryPathInfo(p); - totalNarSize += info->narSize; - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - if (narInfo) - totalDownloadSize += narInfo->fileSize; - } - return {totalNarSize, totalDownloadSize}; -} - const Store::Stats & Store::getStats() { diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 6aa317e3d..32ad2aa44 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -80,7 +80,6 @@ typedef std::map OutputPathMap; enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true }; -enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true }; /** * Magic header of exportPath() output (obsolete). @@ -665,28 +664,6 @@ public: std::string makeValidityRegistration(const StorePathSet & paths, bool showDerivers, bool showHash); - /** - * Write a JSON representation of store path metadata, such as the - * hash and the references. - * - * @param includeImpureInfo If true, variable elements such as the - * registration time are included. - * - * @param showClosureSize If true, the closure size of each path is - * included. - */ - nlohmann::json pathInfoToJSON(const StorePathSet & storePaths, - bool includeImpureInfo, bool showClosureSize, - HashFormat hashFormat = HashFormat::Base32, - AllowInvalidFlag allowInvalid = DisallowInvalid); - - /** - * @return the size of the closure of the specified path, that is, - * the sum of the size of the NAR serialisation of each path in the - * closure. - */ - std::pair getClosureSize(const StorePath & storePath); - /** * Optimise the disk space usage of the Nix store by hard-linking files * with the same contents. diff --git a/src/libstore/tests/characterization.hh b/src/libstore/tests/characterization.hh deleted file mode 100644 index 46bf4b2e5..000000000 --- a/src/libstore/tests/characterization.hh +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once -///@file - -namespace nix { - -/** - * The path to the `unit-test-data` directory. See the contributing - * guide in the manual for further details. - */ -static Path getUnitTestData() { - return getEnv("_NIX_TEST_UNIT_DATA").value(); -} - -/** - * Whether we should update "golden masters" instead of running tests - * against them. See the contributing guide in the manual for further - * details. - */ -static bool testAccept() { - return getEnv("_NIX_TEST_ACCEPT") == "1"; -} - -constexpr std::string_view cannotReadGoldenMaster = - "Cannot read golden master because another test is also updating it"; - -constexpr std::string_view updatingGoldenMaster = - "Updating golden master"; -} diff --git a/src/libstore/tests/common-protocol.cc b/src/libstore/tests/common-protocol.cc index b3f4977d2..c09ac6a3e 100644 --- a/src/libstore/tests/common-protocol.cc +++ b/src/libstore/tests/common-protocol.cc @@ -20,16 +20,9 @@ public: * Golden test for `T` reading */ template - void readTest(PathView testStem, T value) + void readProtoTest(PathView testStem, const T & expected) { - if (testAccept()) - { - GTEST_SKIP() << cannotReadGoldenMaster; - } - else - { - auto encoded = readFile(goldenMaster(testStem)); - + CharacterizationTest::readTest(testStem, [&](const auto & encoded) { T got = ({ StringSource from { encoded }; CommonProto::Serialise::read( @@ -37,44 +30,33 @@ public: CommonProto::ReadConn { .from = from }); }); - ASSERT_EQ(got, value); - } + ASSERT_EQ(got, expected); + }); } /** * Golden test for `T` write */ template - void writeTest(PathView testStem, const T & value) + void writeProtoTest(PathView testStem, const T & decoded) { - auto file = goldenMaster(testStem); - - StringSink to; - CommonProto::write( - *store, - CommonProto::WriteConn { .to = to }, - value); - - if (testAccept()) - { - createDirs(dirOf(file)); - writeFile(file, to.s); - GTEST_SKIP() << updatingGoldenMaster; - } - else - { - auto expected = readFile(file); - ASSERT_EQ(to.s, expected); - } + CharacterizationTest::writeTest(testStem, [&]() -> std::string { + StringSink to; + CommonProto::Serialise::write( + *store, + CommonProto::WriteConn { .to = to }, + decoded); + return to.s; + }); } }; #define CHARACTERIZATION_TEST(NAME, STEM, VALUE) \ TEST_F(CommonProtoTest, NAME ## _read) { \ - readTest(STEM, VALUE); \ + readProtoTest(STEM, VALUE); \ } \ TEST_F(CommonProtoTest, NAME ## _write) { \ - writeTest(STEM, VALUE); \ + writeProtoTest(STEM, VALUE); \ } CHARACTERIZATION_TEST( diff --git a/src/libstore/tests/derivation.cc b/src/libstore/tests/derivation.cc index ca0cdff71..7becfa5ab 100644 --- a/src/libstore/tests/derivation.cc +++ b/src/libstore/tests/derivation.cc @@ -11,20 +11,20 @@ namespace nix { using nlohmann::json; -class DerivationTest : public LibStoreTest +class DerivationTest : public CharacterizationTest, public LibStoreTest { + Path unitTestData = getUnitTestData() + "/libstore/derivation"; + public: + Path goldenMaster(std::string_view testStem) const override { + return unitTestData + "/" + testStem; + } + /** * We set these in tests rather than the regular globals so we don't have * to worry about race conditions if the tests run concurrently. */ ExperimentalFeatureSettings mockXpSettings; - - Path unitTestData = getUnitTestData() + "/libstore/derivation"; - - Path goldenMaster(std::string_view testStem) { - return unitTestData + "/" + testStem; - } }; class CaDerivationTest : public DerivationTest @@ -73,14 +73,8 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) { #define TEST_JSON(FIXTURE, NAME, VAL, DRV_NAME, OUTPUT_NAME) \ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _from_json) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = json::parse( \ - readFile(goldenMaster("output-" #NAME ".json"))); \ + readTest("output-" #NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ DerivationOutput got = DerivationOutput::fromJSON( \ *store, \ DRV_NAME, \ @@ -89,28 +83,20 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) { mockXpSettings); \ DerivationOutput expected { VAL }; \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _to_json) { \ - auto file = goldenMaster("output-" #NAME ".json"); \ - \ - json got = DerivationOutput { VAL }.toJSON( \ - *store, \ - DRV_NAME, \ - OUTPUT_NAME); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got.dump(2) + "\n"); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = json::parse(readFile(file)); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest("output-" #NAME ".json", [&]() -> json { \ + return DerivationOutput { (VAL) }.toJSON( \ + *store, \ + (DRV_NAME), \ + (OUTPUT_NAME)); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ } TEST_JSON(DerivationTest, inputAddressed, @@ -167,50 +153,30 @@ TEST_JSON(ImpureDerivationTest, impure, #define TEST_JSON(FIXTURE, NAME, VAL) \ TEST_F(FIXTURE, Derivation_ ## NAME ## _from_json) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = json::parse( \ - readFile(goldenMaster( #NAME ".json"))); \ + readTest(#NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ Derivation expected { VAL }; \ Derivation got = Derivation::fromJSON( \ *store, \ encoded, \ mockXpSettings); \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, Derivation_ ## NAME ## _to_json) { \ - auto file = goldenMaster( #NAME ".json"); \ - \ - json got = Derivation { VAL }.toJSON(*store); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got.dump(2) + "\n"); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = json::parse(readFile(file)); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest(#NAME ".json", [&]() -> json { \ + return Derivation { VAL }.toJSON(*store); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ } #define TEST_ATERM(FIXTURE, NAME, VAL, DRV_NAME) \ TEST_F(FIXTURE, Derivation_ ## NAME ## _from_aterm) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = readFile(goldenMaster( #NAME ".drv")); \ + readTest(#NAME ".drv", [&](auto encoded) { \ Derivation expected { VAL }; \ auto got = parseDerivation( \ *store, \ @@ -219,25 +185,13 @@ TEST_JSON(ImpureDerivationTest, impure, mockXpSettings); \ ASSERT_EQ(got.toJSON(*store), expected.toJSON(*store)) ; \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, Derivation_ ## NAME ## _to_aterm) { \ - auto file = goldenMaster( #NAME ".drv"); \ - \ - auto got = (VAL).unparse(*store, false); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = readFile(file); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest(#NAME ".drv", [&]() -> std::string { \ + return (VAL).unparse(*store, false); \ + }); \ } Derivation makeSimpleDrv(const Store & store) { diff --git a/src/libstore/tests/libstore.hh b/src/libstore/tests/libstore.hh index ef93457b5..78b162b95 100644 --- a/src/libstore/tests/libstore.hh +++ b/src/libstore/tests/libstore.hh @@ -8,7 +8,7 @@ namespace nix { -class LibStoreTest : public ::testing::Test { +class LibStoreTest : public virtual ::testing::Test { public: static void SetUpTestSuite() { initLibStore(); diff --git a/src/libstore/tests/machines.cc b/src/libstore/tests/machines.cc index f51052b14..fede328ea 100644 --- a/src/libstore/tests/machines.cc +++ b/src/libstore/tests/machines.cc @@ -1,5 +1,7 @@ #include "machines.hh" #include "globals.hh" +#include "file-system.hh" +#include "util.hh" #include diff --git a/src/libstore/tests/nar-info.cc b/src/libstore/tests/nar-info.cc new file mode 100644 index 000000000..c5b21d56b --- /dev/null +++ b/src/libstore/tests/nar-info.cc @@ -0,0 +1,85 @@ +#include +#include + +#include "path-info.hh" + +#include "tests/characterization.hh" +#include "tests/libstore.hh" + +namespace nix { + +using nlohmann::json; + +class NarInfoTest : public CharacterizationTest, public LibStoreTest +{ + Path unitTestData = getUnitTestData() + "/libstore/nar-info"; + + Path goldenMaster(PathView testStem) const override { + return unitTestData + "/" + testStem + ".json"; + } +}; + +static NarInfo makeNarInfo(const Store & store, bool includeImpureInfo) { + NarInfo info = ValidPathInfo { + store, + "foo", + FixedOutputInfo { + .method = FileIngestionMethod::Recursive, + .hash = hashString(HashType::htSHA256, "(...)"), + + .references = { + .others = { + StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + }, + }, + .self = true, + }, + }, + Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + }; + info.narSize = 34878; + if (includeImpureInfo) { + info.deriver = StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + }; + info.registrationTime = 23423; + info.ultimate = true; + info.sigs = { "asdf", "qwer" }; + + info.url = "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz"; + info.compression = "xz"; + info.fileHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="); + info.fileSize = 4029176; + } + return info; +} + +#define JSON_TEST(STEM, PURE) \ + TEST_F(NarInfoTest, NarInfo_ ## STEM ## _from_json) { \ + readTest(#STEM, [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + auto expected = makeNarInfo(*store, PURE); \ + NarInfo got = NarInfo::fromJSON( \ + *store, \ + expected.path, \ + encoded); \ + ASSERT_EQ(got, expected); \ + }); \ + } \ + \ + TEST_F(NarInfoTest, NarInfo_ ## STEM ## _to_json) { \ + writeTest(#STEM, [&]() -> json { \ + return makeNarInfo(*store, PURE) \ + .toJSON(*store, PURE, HashFormat::SRI); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ + } + +JSON_TEST(pure, false) +JSON_TEST(impure, true) + +} diff --git a/src/libstore/tests/path-info.cc b/src/libstore/tests/path-info.cc new file mode 100644 index 000000000..49bf623bd --- /dev/null +++ b/src/libstore/tests/path-info.cc @@ -0,0 +1,79 @@ +#include +#include + +#include "path-info.hh" + +#include "tests/characterization.hh" +#include "tests/libstore.hh" + +namespace nix { + +using nlohmann::json; + +class PathInfoTest : public CharacterizationTest, public LibStoreTest +{ + Path unitTestData = getUnitTestData() + "/libstore/path-info"; + + Path goldenMaster(PathView testStem) const override { + return unitTestData + "/" + testStem + ".json"; + } +}; + +static UnkeyedValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { + UnkeyedValidPathInfo info = ValidPathInfo { + store, + "foo", + FixedOutputInfo { + .method = FileIngestionMethod::Recursive, + .hash = hashString(HashType::htSHA256, "(...)"), + + .references = { + .others = { + StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + }, + }, + .self = true, + }, + }, + Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + }; + info.narSize = 34878; + if (includeImpureInfo) { + info.deriver = StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + }; + info.registrationTime = 23423; + info.ultimate = true; + info.sigs = { "asdf", "qwer" }; + } + return info; +} + +#define JSON_TEST(STEM, PURE) \ + TEST_F(PathInfoTest, PathInfo_ ## STEM ## _from_json) { \ + readTest(#STEM, [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + UnkeyedValidPathInfo got = UnkeyedValidPathInfo::fromJSON( \ + *store, \ + encoded); \ + auto expected = makePathInfo(*store, PURE); \ + ASSERT_EQ(got, expected); \ + }); \ + } \ + \ + TEST_F(PathInfoTest, PathInfo_ ## STEM ## _to_json) { \ + writeTest(#STEM, [&]() -> json { \ + return makePathInfo(*store, PURE) \ + .toJSON(*store, PURE, HashFormat::SRI); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ + } + +JSON_TEST(pure, false) +JSON_TEST(impure, true) + +} diff --git a/src/libstore/tests/protocol.hh b/src/libstore/tests/protocol.hh index 7fdd3e11c..466032a79 100644 --- a/src/libstore/tests/protocol.hh +++ b/src/libstore/tests/protocol.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include #include @@ -7,12 +10,11 @@ namespace nix { template -class ProtoTest : public LibStoreTest +class ProtoTest : public CharacterizationTest, public LibStoreTest { -protected: Path unitTestData = getUnitTestData() + "/libstore/" + protocolDir; - Path goldenMaster(std::string_view testStem) { + Path goldenMaster(std::string_view testStem) const override { return unitTestData + "/" + testStem + ".bin"; } }; @@ -25,18 +27,11 @@ public: * Golden test for `T` reading */ template - void readTest(PathView testStem, typename Proto::Version version, T value) + void readProtoTest(PathView testStem, typename Proto::Version version, T expected) { - if (testAccept()) - { - GTEST_SKIP() << cannotReadGoldenMaster; - } - else - { - auto expected = readFile(ProtoTest::goldenMaster(testStem)); - + CharacterizationTest::readTest(testStem, [&](const auto & encoded) { T got = ({ - StringSource from { expected }; + StringSource from { encoded }; Proto::template Serialise::read( *LibStoreTest::store, typename Proto::ReadConn { @@ -45,47 +40,36 @@ public: }); }); - ASSERT_EQ(got, value); - } + ASSERT_EQ(got, expected); + }); } /** * Golden test for `T` write */ template - void writeTest(PathView testStem, typename Proto::Version version, const T & value) + void writeProtoTest(PathView testStem, typename Proto::Version version, const T & decoded) { - auto file = ProtoTest::goldenMaster(testStem); - - StringSink to; - Proto::write( - *LibStoreTest::store, - typename Proto::WriteConn { - .to = to, - .version = version, - }, - value); - - if (testAccept()) - { - createDirs(dirOf(file)); - writeFile(file, to.s); - GTEST_SKIP() << updatingGoldenMaster; - } - else - { - auto expected = readFile(file); - ASSERT_EQ(to.s, expected); - } + CharacterizationTest::writeTest(testStem, [&]() { + StringSink to; + Proto::template Serialise::write( + *LibStoreTest::store, + typename Proto::WriteConn { + .to = to, + .version = version, + }, + decoded); + return std::move(to.s); + }); } }; #define VERSIONED_CHARACTERIZATION_TEST(FIXTURE, NAME, STEM, VERSION, VALUE) \ TEST_F(FIXTURE, NAME ## _read) { \ - readTest(STEM, VERSION, VALUE); \ + readProtoTest(STEM, VERSION, VALUE); \ } \ TEST_F(FIXTURE, NAME ## _write) { \ - writeTest(STEM, VERSION, VALUE); \ + writeProtoTest(STEM, VERSION, VALUE); \ } } diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc index 99589f8b2..226cdf717 100644 --- a/src/libstore/uds-remote-store.cc +++ b/src/libstore/uds-remote-store.cc @@ -1,4 +1,5 @@ #include "uds-remote-store.hh" +#include "unix-domain-socket.hh" #include "worker-protocol.hh" #include diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index d618b9bd8..7118558b1 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" @@ -32,14 +31,14 @@ std::optional WorkerProto::Serialise>::r void WorkerProto::Serialise>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional & optTrusted) { if (!optTrusted) - conn.to << (uint8_t)0; + conn.to << uint8_t{0}; else { switch (*optTrusted) { case Trusted: - conn.to << (uint8_t)1; + conn.to << uint8_t{1}; break; case NotTrusted: - conn.to << (uint8_t)2; + conn.to << uint8_t{2}; break; default: assert(false); @@ -102,7 +101,7 @@ void WorkerProto::Serialise::write(const Store & store, Worker BuildResult WorkerProto::Serialise::read(const Store & store, WorkerProto::ReadConn conn) { BuildResult res; - res.status = (BuildResult::Status) readInt(conn.from); + res.status = static_cast(readInt(conn.from)); conn.from >> res.errorMsg; if (GET_PROTOCOL_MINOR(conn.version) >= 29) { conn.from diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index dcd54ad16..25d544ba7 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -171,7 +171,7 @@ enum struct WorkerProto::Op : uint64_t */ inline Sink & operator << (Sink & sink, WorkerProto::Op op) { - return sink << (uint64_t) op; + return sink << static_cast(op); } /** @@ -181,7 +181,7 @@ inline Sink & operator << (Sink & sink, WorkerProto::Op op) */ inline std::ostream & operator << (std::ostream & s, WorkerProto::Op op) { - return s << (uint64_t) op; + return s << static_cast(op); } /** diff --git a/src/libutil/abstract-setting-to-json.hh b/src/libutil/abstract-setting-to-json.hh index d506dfb74..eea687d8a 100644 --- a/src/libutil/abstract-setting-to-json.hh +++ b/src/libutil/abstract-setting-to-json.hh @@ -7,7 +7,7 @@ namespace nix { template -std::map BaseSetting::toJSONObject() +std::map BaseSetting::toJSONObject() const { auto obj = AbstractSetting::toJSONObject(); obj.emplace("value", value); diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 4ca84d357..465df2073 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -6,9 +6,10 @@ #include // for strcasecmp #include "archive.hh" -#include "util.hh" #include "config.hh" #include "posix-source-accessor.hh" +#include "file-system.hh" +#include "signals.hh" namespace nix { diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 811353c18..4359c5e8e 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -1,8 +1,14 @@ #include "args.hh" #include "args/root.hh" #include "hash.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "users.hh" #include "json-utils.hh" +#include +#include +#include #include namespace nix { @@ -74,7 +80,176 @@ std::optional RootArgs::needsCompletion(std::string_view s) return {}; } -void RootArgs::parseCmdline(const Strings & _cmdline) +/** + * Basically this is `typedef std::optional Parser(std::string_view s, Strings & r);` + * + * Except we can't recursively reference the Parser typedef, so we have to write a class. + */ +struct Parser { + std::string_view remaining; + + /** + * @brief Parse the next character(s) + * + * @param r + * @return std::shared_ptr + */ + virtual void operator()(std::shared_ptr & state, Strings & r) = 0; + + Parser(std::string_view s) : remaining(s) {}; +}; + +struct ParseQuoted : public Parser { + /** + * @brief Accumulated string + * + * Parsed argument up to this point. + */ + std::string acc; + + ParseQuoted(std::string_view s) : Parser(s) {}; + + virtual void operator()(std::shared_ptr & state, Strings & r) override; +}; + + +struct ParseUnquoted : public Parser { + /** + * @brief Accumulated string + * + * Parsed argument up to this point. Empty string is not representable in + * unquoted syntax, so we use it for the initial state. + */ + std::string acc; + + ParseUnquoted(std::string_view s) : Parser(s) {}; + + virtual void operator()(std::shared_ptr & state, Strings & r) override { + if (remaining.empty()) { + if (!acc.empty()) + r.push_back(acc); + state = nullptr; // done + return; + } + switch (remaining[0]) { + case ' ': case '\t': case '\n': case '\r': + if (!acc.empty()) + r.push_back(acc); + state = std::make_shared(ParseUnquoted(remaining.substr(1))); + return; + case '`': + if (remaining.size() > 1 && remaining[1] == '`') { + state = std::make_shared(ParseQuoted(remaining.substr(2))); + return; + } + else + throw Error("single backtick is not a supported syntax in the nix shebang."); + + // reserved characters + // meaning to be determined, or may be reserved indefinitely so that + // #!nix syntax looks unambiguous + case '$': + case '*': + case '~': + case '<': + case '>': + case '|': + case ';': + case '(': + case ')': + case '[': + case ']': + case '{': + case '}': + case '\'': + case '"': + case '\\': + throw Error("unsupported unquoted character in nix shebang: " + std::string(1, remaining[0]) + ". Use double backticks to escape?"); + + case '#': + if (acc.empty()) { + throw Error ("unquoted nix shebang argument cannot start with #. Use double backticks to escape?"); + } else { + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + + default: + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + assert(false); + } +}; + +void ParseQuoted::operator()(std::shared_ptr &state, Strings & r) { + if (remaining.empty()) { + throw Error("unterminated quoted string in nix shebang"); + } + switch (remaining[0]) { + case ' ': + if ((remaining.size() == 3 && remaining[1] == '`' && remaining[2] == '`') + || (remaining.size() > 3 && remaining[1] == '`' && remaining[2] == '`' && remaining[3] != '`')) { + // exactly two backticks mark the end of a quoted string, but a preceding space is ignored if present. + state = std::make_shared(ParseUnquoted(remaining.substr(3))); + r.push_back(acc); + return; + } + else { + // just a normal space + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + case '`': + // exactly two backticks mark the end of a quoted string + if ((remaining.size() == 2 && remaining[1] == '`') + || (remaining.size() > 2 && remaining[1] == '`' && remaining[2] != '`')) { + state = std::make_shared(ParseUnquoted(remaining.substr(2))); + r.push_back(acc); + return; + } + + // a sequence of at least 3 backticks is one escape-backtick which is ignored, followed by any number of backticks, which are verbatim + else if (remaining.size() >= 3 && remaining[1] == '`' && remaining[2] == '`') { + // ignore "escape" backtick + remaining = remaining.substr(1); + // add the rest + while (remaining.size() > 0 && remaining[0] == '`') { + acc += '`'; + remaining = remaining.substr(1); + } + return; + } + else { + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + default: + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + assert(false); +} + +Strings parseShebangContent(std::string_view s) { + Strings result; + std::shared_ptr parserState(std::make_shared(ParseUnquoted(s))); + + // trampoline == iterated strategy pattern + while (parserState) { + auto currentState = parserState; + (*currentState)(parserState, result); + } + + return result; +} + +void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) { Strings pendingArgs; bool dashDash = false; @@ -90,6 +265,45 @@ void RootArgs::parseCmdline(const Strings & _cmdline) } bool argsSeen = false; + + // Heuristic to see if we're invoked as a shebang script, namely, + // if we have at least one argument, it's the name of an + // executable file, and it starts with "#!". + Strings savedArgs; + if (allowShebang){ + auto script = *cmdline.begin(); + try { + std::ifstream stream(script); + char shebang[3]={0,0,0}; + stream.get(shebang,3); + if (strncmp(shebang,"#!",2) == 0){ + for (auto pos = std::next(cmdline.begin()); pos != cmdline.end();pos++) + savedArgs.push_back(*pos); + cmdline.clear(); + + std::string line; + std::getline(stream,line); + static const std::string commentChars("#/\\%@*-"); + std::string shebangContent; + while (std::getline(stream,line) && !line.empty() && commentChars.find(line[0]) != std::string::npos){ + line = chomp(line); + + std::smatch match; + // We match one space after `nix` so that we preserve indentation. + // No space is necessary for an empty line. An empty line has basically no effect. + if (std::regex_match(line, match, std::regex("^#!\\s*nix(:? |$)(.*)$"))) + shebangContent += match[2].str() + "\n"; + } + for (const auto & word : parseShebangContent(shebangContent)) { + cmdline.push_back(word); + } + cmdline.push_back(script); + commandBaseDir = dirOf(script); + for (auto pos = savedArgs.begin(); pos != savedArgs.end();pos++) + cmdline.push_back(*pos); + } + } catch (SysError &) { } + } for (auto pos = cmdline.begin(); pos != cmdline.end(); ) { auto arg = *pos; @@ -145,6 +359,17 @@ void RootArgs::parseCmdline(const Strings & _cmdline) d.completer(*completions, d.n, d.prefix); } +Path Args::getCommandBaseDir() const +{ + assert(parent); + return parent->getCommandBaseDir(); +} + +Path RootArgs::getCommandBaseDir() const +{ + return commandBaseDir; +} + bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) { assert(pos != end); diff --git a/src/libutil/args.hh b/src/libutil/args.hh index e3b41313f..7af82b178 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -2,12 +2,15 @@ ///@file #include +#include #include #include +#include #include -#include "util.hh" +#include "types.hh" +#include "experimental-features.hh" namespace nix { @@ -21,11 +24,12 @@ class AddCompletions; class Args { + public: /** * Return a short one-line description of the command. - */ + */ virtual std::string description() { return ""; } virtual bool forceImpureByDefault() { return false; } @@ -35,6 +39,16 @@ public: */ virtual std::string doc() { return ""; } + /** + * @brief Get the base directory for the command. + * + * @return Generally the working directory, but in case of a shebang + * interpreter, returns the directory of the script. + * + * This only returns the correct value after parseCmdline() has run. + */ + virtual Path getCommandBaseDir() const; + protected: /** @@ -395,4 +409,6 @@ public: virtual void add(std::string completion, std::string description = "") = 0; }; +Strings parseShebangContent(std::string_view s); + } diff --git a/src/libutil/args/root.hh b/src/libutil/args/root.hh index bb98732a1..5c55c37a5 100644 --- a/src/libutil/args/root.hh +++ b/src/libutil/args/root.hh @@ -29,14 +29,26 @@ struct Completions final : AddCompletions */ class RootArgs : virtual public Args { + /** + * @brief The command's "working directory", but only set when top level. + * + * Use getCommandBaseDir() to get the directory regardless of whether this + * is a top-level command or subcommand. + * + * @see getCommandBaseDir() + */ + Path commandBaseDir = "."; + public: /** Parse the command line, throwing a UsageError if something goes * wrong. */ - void parseCmdline(const Strings & cmdline); + void parseCmdline(const Strings & cmdline, bool allowShebang = false); std::shared_ptr completions; + Path getCommandBaseDir() const override; + protected: friend class Args; diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 040464532..f678fae94 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -1,5 +1,5 @@ #include "canon-path.hh" -#include "util.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/cgroup.cc b/src/libutil/cgroup.cc index a008481ca..4c2bf31ff 100644 --- a/src/libutil/cgroup.cc +++ b/src/libutil/cgroup.cc @@ -2,6 +2,7 @@ #include "cgroup.hh" #include "util.hh" +#include "file-system.hh" #include "finally.hh" #include diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index ba0847cde..d06f1f87b 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -1,6 +1,6 @@ #include "compression.hh" +#include "signals.hh" #include "tarfile.hh" -#include "util.hh" #include "finally.hh" #include "logging.hh" diff --git a/src/libutil/config-impl.hh b/src/libutil/config-impl.hh index b9639e761..9f69e8444 100644 --- a/src/libutil/config-impl.hh +++ b/src/libutil/config-impl.hh @@ -45,13 +45,13 @@ bool BaseSetting::isAppendable() return trait::appendable; } -template<> void BaseSetting::appendOrSet(Strings && newValue, bool append); -template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append); -template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append); -template<> void BaseSetting>::appendOrSet(std::set && newValue, bool append); +template<> void BaseSetting::appendOrSet(Strings newValue, bool append); +template<> void BaseSetting::appendOrSet(StringSet newValue, bool append); +template<> void BaseSetting::appendOrSet(StringMap newValue, bool append); +template<> void BaseSetting>::appendOrSet(std::set newValue, bool append); template -void BaseSetting::appendOrSet(T && newValue, bool append) +void BaseSetting::appendOrSet(T newValue, bool append) { static_assert( !trait::appendable, diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 8e06273ee..96a0a4df8 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -2,6 +2,8 @@ #include "args.hh" #include "abstract-setting-to-json.hh" #include "experimental-features.hh" +#include "util.hh" +#include "file-system.hh" #include "config-impl.hh" @@ -34,27 +36,25 @@ bool Config::set(const std::string & name, const std::string & value) void Config::addSetting(AbstractSetting * setting) { _settings.emplace(setting->name, Config::SettingData{false, setting}); - for (auto & alias : setting->aliases) + for (const auto & alias : setting->aliases) _settings.emplace(alias, Config::SettingData{true, setting}); bool set = false; - auto i = unknownSettings.find(setting->name); - if (i != unknownSettings.end()) { - setting->set(i->second); + if (auto i = unknownSettings.find(setting->name); i != unknownSettings.end()) { + setting->set(std::move(i->second)); setting->overridden = true; unknownSettings.erase(i); set = true; } for (auto & alias : setting->aliases) { - auto i = unknownSettings.find(alias); - if (i != unknownSettings.end()) { + if (auto i = unknownSettings.find(alias); i != unknownSettings.end()) { if (set) warn("setting '%s' is set, but it's an alias of '%s' which is also set", alias, setting->name); else { - setting->set(i->second); + setting->set(std::move(i->second)); setting->overridden = true; unknownSettings.erase(i); set = true; @@ -69,7 +69,7 @@ AbstractConfig::AbstractConfig(StringMap initials) void AbstractConfig::warnUnknownSettings() { - for (auto & s : unknownSettings) + for (const auto & s : unknownSettings) warn("unknown setting '%s'", s.first); } @@ -83,7 +83,7 @@ void AbstractConfig::reapplyUnknownSettings() void Config::getSettings(std::map & res, bool overriddenOnly) { - for (auto & opt : _settings) + for (const auto & opt : _settings) if (!opt.second.isAlias && (!overriddenOnly || opt.second.setting->overridden)) res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description}); } @@ -99,8 +99,7 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string line += contents[pos++]; pos++; - auto hash = line.find('#'); - if (hash != std::string::npos) + if (auto hash = line.find('#'); hash != line.npos) line = std::string(line, 0, hash); auto tokens = tokenizeString>(line); @@ -133,24 +132,24 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string if (tokens[1] != "=") throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); - std::string name = tokens[0]; + std::string name = std::move(tokens[0]); auto i = tokens.begin(); advance(i, 2); parsedContents.push_back({ - name, + std::move(name), concatStringsSep(" ", Strings(i, tokens.end())), }); }; // First apply experimental-feature related settings - for (auto & [name, value] : parsedContents) + for (const auto & [name, value] : parsedContents) if (name == "experimental-features" || name == "extra-experimental-features") set(name, value); // Then apply other settings - for (auto & [name, value] : parsedContents) + for (const auto & [name, value] : parsedContents) if (name != "experimental-features" && name != "extra-experimental-features") set(name, value); } @@ -172,7 +171,7 @@ void Config::resetOverridden() nlohmann::json Config::toJSON() { auto res = nlohmann::json::object(); - for (auto & s : _settings) + for (const auto & s : _settings) if (!s.second.isAlias) res.emplace(s.first, s.second.setting->toJSON()); return res; @@ -180,8 +179,8 @@ nlohmann::json Config::toJSON() std::string Config::toKeyValue() { - auto res = std::string(); - for (auto & s : _settings) + std::string res; + for (const auto & s : _settings) if (s.second.isAlias) res += fmt("%s = %s\n", s.first, s.second.setting->to_string()); return res; @@ -203,7 +202,7 @@ AbstractSetting::AbstractSetting( : name(name) , description(stripIndentation(description)) , aliases(aliases) - , experimentalFeature(experimentalFeature) + , experimentalFeature(std::move(experimentalFeature)) { } @@ -219,7 +218,7 @@ nlohmann::json AbstractSetting::toJSON() return nlohmann::json(toJSONObject()); } -std::map AbstractSetting::toJSONObject() +std::map AbstractSetting::toJSONObject() const { std::map obj; obj.emplace("description", description); @@ -282,14 +281,14 @@ template<> void BaseSetting::convertToArg(Args & args, const std::string & .longName = name, .description = fmt("Enable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(true); }}, + .handler = {[this] { override(true); }}, .experimentalFeature = experimentalFeature, }); args.addFlag({ .longName = "no-" + name, .description = fmt("Disable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(false); }}, + .handler = {[this] { override(false); }}, .experimentalFeature = experimentalFeature, }); } @@ -299,10 +298,11 @@ template<> Strings BaseSetting::parse(const std::string & str) const return tokenizeString(str); } -template<> void BaseSetting::appendOrSet(Strings && newValue, bool append) +template<> void BaseSetting::appendOrSet(Strings newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) value.push_back(std::move(s)); + value.insert(value.end(), std::make_move_iterator(newValue.begin()), + std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const @@ -315,11 +315,10 @@ template<> StringSet BaseSetting::parse(const std::string & str) cons return tokenizeString(str); } -template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append) +template<> void BaseSetting::appendOrSet(StringSet newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) - value.insert(s); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const @@ -331,26 +330,26 @@ template<> std::set BaseSetting res; for (auto & s : tokenizeString(str)) { - auto thisXpFeature = parseExperimentalFeature(s); - if (thisXpFeature) + if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { res.insert(thisXpFeature.value()); - else + if (thisXpFeature.value() == Xp::Flakes) + res.insert(Xp::FetchTree); + } else warn("unknown experimental feature '%s'", s); } return res; } -template<> void BaseSetting>::appendOrSet(std::set && newValue, bool append) +template<> void BaseSetting>::appendOrSet(std::set newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) - value.insert(s); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting>::to_string() const { StringSet stringifiedXpFeatures; - for (auto & feature : value) + for (const auto & feature : value) stringifiedXpFeatures.insert(std::string(showExperimentalFeature(feature))); return concatStringsSep(" ", stringifiedXpFeatures); } @@ -358,28 +357,25 @@ template<> std::string BaseSetting>::to_string() c template<> StringMap BaseSetting::parse(const std::string & str) const { StringMap res; - for (auto & s : tokenizeString(str)) { - auto eq = s.find_first_of('='); - if (std::string::npos != eq) + for (const auto & s : tokenizeString(str)) { + if (auto eq = s.find_first_of('='); s.npos != eq) res.emplace(std::string(s, 0, eq), std::string(s, eq + 1)); // else ignored } return res; } -template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append) +template<> void BaseSetting::appendOrSet(StringMap newValue, bool append) { if (!append) value.clear(); - for (auto && [k, v] : std::move(newValue)) - value.emplace(std::move(k), std::move(v)); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const { - Strings kvstrs; - std::transform(value.begin(), value.end(), back_inserter(kvstrs), - [&](auto kvpair){ return kvpair.first + "=" + kvpair.second; }); - return concatStringsSep(" ", kvstrs); + return std::transform_reduce(value.cbegin(), value.cend(), std::string{}, + [](const auto & l, const auto &r) { return l + " " + r; }, + [](const auto & kvpair){ return kvpair.first + "=" + kvpair.second; }); } template class BaseSetting; @@ -468,7 +464,7 @@ void GlobalConfig::resetOverridden() nlohmann::json GlobalConfig::toJSON() { auto res = nlohmann::json::object(); - for (auto & config : *configRegistrations) + for (const auto & config : *configRegistrations) res.update(config->toJSON()); return res; } @@ -478,7 +474,7 @@ std::string GlobalConfig::toKeyValue() std::string res; std::map settings; globalConfig.getSettings(settings); - for (auto & s : settings) + for (const auto & s : settings) res += fmt("%s = %s\n", s.first, s.second.value); return res; } diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 38c3ce0c4..5d7bd8e0c 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -150,7 +150,7 @@ public: AbstractSetting * setting; }; - typedef std::map Settings; + using Settings = std::map; private: @@ -213,7 +213,7 @@ protected: nlohmann::json toJSON(); - virtual std::map toJSONObject(); + virtual std::map toJSONObject() const; virtual void convertToArg(Args & args, const std::string & category); @@ -247,7 +247,7 @@ protected: * * @param append Whether to append or overwrite. */ - virtual void appendOrSet(T && newValue, bool append); + virtual void appendOrSet(T newValue, bool append); public: @@ -306,7 +306,7 @@ public: void convertToArg(Args & args, const std::string & category) override; - std::map toJSONObject() override; + std::map toJSONObject() const override; }; template @@ -316,7 +316,7 @@ std::ostream & operator <<(std::ostream & str, const BaseSetting & opt) } template -bool operator ==(const T & v1, const BaseSetting & v2) { return v1 == (const T &) v2; } +bool operator ==(const T & v1, const BaseSetting & v2) { return v1 == static_cast(v2); } template class Setting : public BaseSetting @@ -329,7 +329,7 @@ public: const std::set & aliases = {}, const bool documentDefault = true, std::optional experimentalFeature = std::nullopt) - : BaseSetting(def, documentDefault, name, description, aliases, experimentalFeature) + : BaseSetting(def, documentDefault, name, description, aliases, std::move(experimentalFeature)) { options->addSetting(this); } diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc new file mode 100644 index 000000000..352a6a0fb --- /dev/null +++ b/src/libutil/current-process.cc @@ -0,0 +1,110 @@ +#include "current-process.hh" +#include "namespaces.hh" +#include "util.hh" +#include "finally.hh" +#include "file-system.hh" +#include "processes.hh" +#include "signals.hh" + +#ifdef __APPLE__ +# include +#endif + +#if __linux__ +# include +# include +# include "cgroup.hh" +#endif + +#include + +namespace nix { + +unsigned int getMaxCPU() +{ + #if __linux__ + try { + auto cgroupFS = getCgroupFS(); + if (!cgroupFS) return 0; + + auto cgroups = getCgroups("/proc/self/cgroup"); + auto cgroup = cgroups[""]; + if (cgroup == "") return 0; + + auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max"; + + auto cpuMax = readFile(cpuFile); + auto cpuMaxParts = tokenizeString>(cpuMax, " \n"); + auto quota = cpuMaxParts[0]; + auto period = cpuMaxParts[1]; + if (quota != "max") + return std::ceil(std::stoi(quota) / std::stof(period)); + } catch (Error &) { ignoreException(lvlDebug); } + #endif + + return 0; +} + + +////////////////////////////////////////////////////////////////////// + + +#if __linux__ +rlim_t savedStackSize = 0; +#endif + +void setStackSize(size_t stackSize) +{ + #if __linux__ + struct rlimit limit; + if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { + savedStackSize = limit.rlim_cur; + limit.rlim_cur = stackSize; + setrlimit(RLIMIT_STACK, &limit); + } + #endif +} + +void restoreProcessContext(bool restoreMounts) +{ + restoreSignals(); + if (restoreMounts) { + restoreMountNamespace(); + } + + #if __linux__ + if (savedStackSize) { + struct rlimit limit; + if (getrlimit(RLIMIT_STACK, &limit) == 0) { + limit.rlim_cur = savedStackSize; + setrlimit(RLIMIT_STACK, &limit); + } + } + #endif +} + + +////////////////////////////////////////////////////////////////////// + + +std::optional getSelfExe() +{ + static auto cached = []() -> std::optional + { + #if __linux__ + return readLink("/proc/self/exe"); + #elif __APPLE__ + char buf[1024]; + uint32_t size = sizeof(buf); + if (_NSGetExecutablePath(buf, &size) == 0) + return buf; + else + return std::nullopt; + #else + return std::nullopt; + #endif + }(); + return cached; +} + +} diff --git a/src/libutil/current-process.hh b/src/libutil/current-process.hh new file mode 100644 index 000000000..826d6fe20 --- /dev/null +++ b/src/libutil/current-process.hh @@ -0,0 +1,34 @@ +#pragma once +///@file + +#include + +#include "types.hh" + +namespace nix { + +/** + * If cgroups are active, attempt to calculate the number of CPUs available. + * If cgroups are unavailable or if cpu.max is set to "max", return 0. + */ +unsigned int getMaxCPU(); + +/** + * Change the stack size. + */ +void setStackSize(size_t stackSize); + +/** + * Restore the original inherited Unix process context (such as signal + * masks, stack size). + + * See startSignalHandlerThread(), saveSignalMask(). + */ +void restoreProcessContext(bool restoreMounts = true); + +/** + * @return the path of the current executable. + */ +std::optional getSelfExe(); + +} diff --git a/src/libutil/environment-variables.cc b/src/libutil/environment-variables.cc new file mode 100644 index 000000000..6618d7872 --- /dev/null +++ b/src/libutil/environment-variables.cc @@ -0,0 +1,49 @@ +#include "util.hh" +#include "environment-variables.hh" + +extern char * * environ __attribute__((weak)); + +namespace nix { + +std::optional getEnv(const std::string & key) +{ + char * value = getenv(key.c_str()); + if (!value) return {}; + return std::string(value); +} + +std::optional getEnvNonEmpty(const std::string & key) { + auto value = getEnv(key); + if (value == "") return {}; + return value; +} + +std::map getEnv() +{ + std::map env; + for (size_t i = 0; environ[i]; ++i) { + auto s = environ[i]; + auto eq = strchr(s, '='); + if (!eq) + // invalid env, just keep going + continue; + env.emplace(std::string(s, eq), std::string(eq + 1)); + } + return env; +} + + +void clearEnv() +{ + for (auto & name : getEnv()) + unsetenv(name.first.c_str()); +} + +void replaceEnv(const std::map & newEnv) +{ + clearEnv(); + for (auto & newEnvVar : newEnv) + setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1); +} + +} diff --git a/src/libutil/environment-variables.hh b/src/libutil/environment-variables.hh new file mode 100644 index 000000000..21eb4619b --- /dev/null +++ b/src/libutil/environment-variables.hh @@ -0,0 +1,41 @@ +#pragma once +/** + * @file + * + * Utilities for working with the current process's environment + * variables. + */ + +#include + +#include "types.hh" + +namespace nix { + +/** + * @return an environment variable. + */ +std::optional getEnv(const std::string & key); + +/** + * @return a non empty environment variable. Returns nullopt if the env + * variable is set to "" + */ +std::optional getEnvNonEmpty(const std::string & key); + +/** + * Get the entire environment. + */ +std::map getEnv(); + +/** + * Clear the environment. + */ +void clearEnv(); + +/** + * Replace the entire environment with the given one. + */ +void replaceEnv(const std::map & newEnv); + +} diff --git a/src/libutil/error.cc b/src/libutil/error.cc index dd9612471..8488e7e21 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -1,4 +1,7 @@ #include "error.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "terminal.hh" #include #include @@ -7,8 +10,6 @@ namespace nix { -const std::string nativeSystem = SYSTEM; - void BaseError::addTrace(std::shared_ptr && e, hintformat hint, bool frame) { err.traces.push_front(Trace { .pos = std::move(e), .hint = hint, .frame = frame }); diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 74af9aae0..88fb55713 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -12,7 +12,19 @@ struct ExperimentalFeatureDetails std::string_view description; }; -constexpr std::array xpFeatureDetails = {{ +/** + * If two different PRs both add an experimental feature, and we just + * used a number for this, we *woudln't* get merge conflict and the + * counter will be incremented once instead of twice, causing a build + * failure. + * + * By instead defining this instead as 1 + the bottom experimental + * feature, we either have no issue at all if few features are not added + * at the end of the list, or a proper merge conflict if they are. + */ +constexpr size_t numXpFeatures = 1 + static_cast(Xp::VerifiedFetches); + +constexpr std::array xpFeatureDetails = {{ { .tag = Xp::CaDerivations, .name = "ca-derivations", @@ -62,6 +74,20 @@ constexpr std::array xpFeatureDetails = {{ flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. )", }, + { + .tag = Xp::FetchTree, + .name = "fetch-tree", + .description = R"( + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. + + `fetchTree` exposes a large suite of fetching functionality in a more systematic way. + The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`. + + This built-in was previously guarded by the `flakes` experimental feature because of that overlap, + but since the plan is to work on stabilizing this first (due 2024 Q1), we are putting it underneath a separate feature. + Once we've made the changes we want to make, enabling just this feature will serve as a "release candidate" --- allowing users to try out the functionality we want to stabilize and not any other functionality we don't yet want to, in isolation. + )", + }, { .tag = Xp::NixCommand, .name = "nix-command", @@ -227,7 +253,14 @@ constexpr std::array xpFeatureDetails = {{ .description = R"( Allow the use of the [impure-env](@docroot@/command-ref/conf-file.md#conf-impure-env) setting. )", - } + }, + { + .tag = Xp::VerifiedFetches, + .name = "verified-fetches", + .description = R"( + Enables verification of git commit signatures through the [`fetchGit`](@docroot@/language/builtins.md#builtins-fetchGit) built-in. + )", + }, }}; static_assert( diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh index e02f8353e..f580fd030 100644 --- a/src/libutil/experimental-features.hh +++ b/src/libutil/experimental-features.hh @@ -20,6 +20,7 @@ enum struct ExperimentalFeature CaDerivations, ImpureDerivations, Flakes, + FetchTree, NixCommand, RecursiveNix, NoUrlLiterals, @@ -32,6 +33,7 @@ enum struct ExperimentalFeature ParseTomlTimestamps, ReadOnlyLocalStore, ConfigurableImpureEnv, + VerifiedFetches, }; /** diff --git a/src/libutil/file-descriptor.cc b/src/libutil/file-descriptor.cc new file mode 100644 index 000000000..38dd70c8e --- /dev/null +++ b/src/libutil/file-descriptor.cc @@ -0,0 +1,254 @@ +#include "file-system.hh" +#include "signals.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include + +namespace nix { + +std::string readFile(int fd) +{ + struct stat st; + if (fstat(fd, &st) == -1) + throw SysError("statting file"); + + return drainFD(fd, true, st.st_size); +} + + +void readFull(int fd, char * buf, size_t count) +{ + while (count) { + checkInterrupt(); + ssize_t res = read(fd, buf, count); + if (res == -1) { + if (errno == EINTR) continue; + throw SysError("reading from file"); + } + if (res == 0) throw EndOfFile("unexpected end-of-file"); + count -= res; + buf += res; + } +} + + +void writeFull(int fd, std::string_view s, bool allowInterrupts) +{ + while (!s.empty()) { + if (allowInterrupts) checkInterrupt(); + ssize_t res = write(fd, s.data(), s.size()); + if (res == -1 && errno != EINTR) + throw SysError("writing to file"); + if (res > 0) + s.remove_prefix(res); + } +} + + +std::string readLine(int fd) +{ + std::string s; + while (1) { + checkInterrupt(); + char ch; + // FIXME: inefficient + ssize_t rd = read(fd, &ch, 1); + if (rd == -1) { + if (errno != EINTR) + throw SysError("reading a line"); + } else if (rd == 0) + throw EndOfFile("unexpected EOF reading a line"); + else { + if (ch == '\n') return s; + s += ch; + } + } +} + + +void writeLine(int fd, std::string s) +{ + s += '\n'; + writeFull(fd, s); +} + + +std::string drainFD(int fd, bool block, const size_t reserveSize) +{ + // the parser needs two extra bytes to append terminating characters, other users will + // not care very much about the extra memory. + StringSink sink(reserveSize + 2); + drainFD(fd, sink, block); + return std::move(sink.s); +} + + +void drainFD(int fd, Sink & sink, bool block) +{ + // silence GCC maybe-uninitialized warning in finally + int saved = 0; + + if (!block) { + saved = fcntl(fd, F_GETFL); + if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) + throw SysError("making file descriptor non-blocking"); + } + + Finally finally([&]() { + if (!block) { + if (fcntl(fd, F_SETFL, saved) == -1) + throw SysError("making file descriptor blocking"); + } + }); + + std::vector buf(64 * 1024); + while (1) { + checkInterrupt(); + ssize_t rd = read(fd, buf.data(), buf.size()); + if (rd == -1) { + if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; + if (errno != EINTR) + throw SysError("reading from file"); + } + else if (rd == 0) break; + else sink({(char *) buf.data(), (size_t) rd}); + } +} + +////////////////////////////////////////////////////////////////////// + +AutoCloseFD::AutoCloseFD() : fd{-1} {} + + +AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {} + + +AutoCloseFD::AutoCloseFD(AutoCloseFD && that) : fd{that.fd} +{ + that.fd = -1; +} + + +AutoCloseFD & AutoCloseFD::operator =(AutoCloseFD && that) +{ + close(); + fd = that.fd; + that.fd = -1; + return *this; +} + + +AutoCloseFD::~AutoCloseFD() +{ + try { + close(); + } catch (...) { + ignoreException(); + } +} + + +int AutoCloseFD::get() const +{ + return fd; +} + + +void AutoCloseFD::close() +{ + if (fd != -1) { + if (::close(fd) == -1) + /* This should never happen. */ + throw SysError("closing file descriptor %1%", fd); + fd = -1; + } +} + +void AutoCloseFD::fsync() +{ + if (fd != -1) { + int result; +#if __APPLE__ + result = ::fcntl(fd, F_FULLFSYNC); +#else + result = ::fsync(fd); +#endif + if (result == -1) + throw SysError("fsync file descriptor %1%", fd); + } +} + + +AutoCloseFD::operator bool() const +{ + return fd != -1; +} + + +int AutoCloseFD::release() +{ + int oldFD = fd; + fd = -1; + return oldFD; +} + + +void Pipe::create() +{ + int fds[2]; +#if HAVE_PIPE2 + if (pipe2(fds, O_CLOEXEC) != 0) throw SysError("creating pipe"); +#else + if (pipe(fds) != 0) throw SysError("creating pipe"); + closeOnExec(fds[0]); + closeOnExec(fds[1]); +#endif + readSide = fds[0]; + writeSide = fds[1]; +} + + +void Pipe::close() +{ + readSide.close(); + writeSide.close(); +} + +////////////////////////////////////////////////////////////////////// + +void closeMostFDs(const std::set & exceptions) +{ +#if __linux__ + try { + for (auto & s : readDirectory("/proc/self/fd")) { + auto fd = std::stoi(s.name); + if (!exceptions.count(fd)) { + debug("closing leaked FD %d", fd); + close(fd); + } + } + return; + } catch (SysError &) { + } +#endif + + int maxFD = 0; + maxFD = sysconf(_SC_OPEN_MAX); + for (int fd = 0; fd < maxFD; ++fd) + if (!exceptions.count(fd)) + close(fd); /* ignore result */ +} + + +void closeOnExec(int fd) +{ + int prev; + if ((prev = fcntl(fd, F_GETFD, 0)) == -1 || + fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1) + throw SysError("setting close-on-exec flag"); +} + +} diff --git a/src/libutil/file-descriptor.hh b/src/libutil/file-descriptor.hh new file mode 100644 index 000000000..80ec86135 --- /dev/null +++ b/src/libutil/file-descriptor.hh @@ -0,0 +1,84 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" + +namespace nix { + +struct Sink; +struct Source; + +/** + * Read the contents of a resource into a string. + */ +std::string readFile(int fd); + +/** + * Wrappers arount read()/write() that read/write exactly the + * requested number of bytes. + */ +void readFull(int fd, char * buf, size_t count); + +void writeFull(int fd, std::string_view s, bool allowInterrupts = true); + +/** + * Read a line from a file descriptor. + */ +std::string readLine(int fd); + +/** + * Write a line to a file descriptor. + */ +void writeLine(int fd, std::string s); + +/** + * Read a file descriptor until EOF occurs. + */ +std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); + +void drainFD(int fd, Sink & sink, bool block = true); + +/** + * Automatic cleanup of resources. + */ +class AutoCloseFD +{ + int fd; +public: + AutoCloseFD(); + AutoCloseFD(int fd); + AutoCloseFD(const AutoCloseFD & fd) = delete; + AutoCloseFD(AutoCloseFD&& fd); + ~AutoCloseFD(); + AutoCloseFD& operator =(const AutoCloseFD & fd) = delete; + AutoCloseFD& operator =(AutoCloseFD&& fd); + int get() const; + explicit operator bool() const; + int release(); + void close(); + void fsync(); +}; + +class Pipe +{ +public: + AutoCloseFD readSide, writeSide; + void create(); + void close(); +}; + +/** + * Close all file descriptors except those listed in the given set. + * Good practice in child processes. + */ +void closeMostFDs(const std::set & exceptions); + +/** + * Set the close-on-exec flag for the given file descriptor. + */ +void closeOnExec(int fd); + +MakeError(EndOfFile, Error); + +} diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc new file mode 100644 index 000000000..c96effff9 --- /dev/null +++ b/src/libutil/file-system.cc @@ -0,0 +1,647 @@ +#include "environment-variables.hh" +#include "file-system.hh" +#include "signals.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace fs = std::filesystem; + +namespace nix { + +Path absPath(Path path, std::optional dir, bool resolveSymlinks) +{ + if (path[0] != '/') { + if (!dir) { +#ifdef __GNU__ + /* GNU (aka. GNU/Hurd) doesn't have any limitation on path + lengths and doesn't define `PATH_MAX'. */ + char *buf = getcwd(NULL, 0); + if (buf == NULL) +#else + char buf[PATH_MAX]; + if (!getcwd(buf, sizeof(buf))) +#endif + throw SysError("cannot get cwd"); + path = concatStrings(buf, "/", path); +#ifdef __GNU__ + free(buf); +#endif + } else + path = concatStrings(*dir, "/", path); + } + return canonPath(path, resolveSymlinks); +} + + +Path canonPath(PathView path, bool resolveSymlinks) +{ + assert(path != ""); + + std::string s; + s.reserve(256); + + if (path[0] != '/') + throw Error("not an absolute path: '%1%'", path); + + std::string temp; + + /* Count the number of times we follow a symlink and stop at some + arbitrary (but high) limit to prevent infinite loops. */ + unsigned int followCount = 0, maxFollow = 1024; + + while (1) { + + /* Skip slashes. */ + while (!path.empty() && path[0] == '/') path.remove_prefix(1); + if (path.empty()) break; + + /* Ignore `.'. */ + if (path == "." || path.substr(0, 2) == "./") + path.remove_prefix(1); + + /* If `..', delete the last component. */ + else if (path == ".." || path.substr(0, 3) == "../") + { + if (!s.empty()) s.erase(s.rfind('/')); + path.remove_prefix(2); + } + + /* Normal component; copy it. */ + else { + s += '/'; + if (const auto slash = path.find('/'); slash == std::string::npos) { + s += path; + path = {}; + } else { + s += path.substr(0, slash); + path = path.substr(slash); + } + + /* If s points to a symlink, resolve it and continue from there */ + if (resolveSymlinks && isLink(s)) { + if (++followCount >= maxFollow) + throw Error("infinite symlink recursion in path '%1%'", path); + temp = concatStrings(readLink(s), path); + path = temp; + if (!temp.empty() && temp[0] == '/') { + s.clear(); /* restart for symlinks pointing to absolute path */ + } else { + s = dirOf(s); + if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = / + s.clear(); + } + } + } + } + } + + return s.empty() ? "/" : std::move(s); +} + + +Path dirOf(const PathView path) +{ + Path::size_type pos = path.rfind('/'); + if (pos == std::string::npos) + return "."; + return pos == 0 ? "/" : Path(path, 0, pos); +} + + +std::string_view baseNameOf(std::string_view path) +{ + if (path.empty()) + return ""; + + auto last = path.size() - 1; + if (path[last] == '/' && last > 0) + last -= 1; + + auto pos = path.rfind('/', last); + if (pos == std::string::npos) + pos = 0; + else + pos += 1; + + return path.substr(pos, last - pos + 1); +} + + +bool isInDir(std::string_view path, std::string_view dir) +{ + return path.substr(0, 1) == "/" + && path.substr(0, dir.size()) == dir + && path.size() >= dir.size() + 2 + && path[dir.size()] == '/'; +} + + +bool isDirOrInDir(std::string_view path, std::string_view dir) +{ + return path == dir || isInDir(path, dir); +} + + +struct stat stat(const Path & path) +{ + struct stat st; + if (stat(path.c_str(), &st)) + throw SysError("getting status of '%1%'", path); + return st; +} + + +struct stat lstat(const Path & path) +{ + struct stat st; + if (lstat(path.c_str(), &st)) + throw SysError("getting status of '%1%'", path); + return st; +} + + +bool pathExists(const Path & path) +{ + int res; + struct stat st; + res = lstat(path.c_str(), &st); + if (!res) return true; + if (errno != ENOENT && errno != ENOTDIR) + throw SysError("getting status of %1%", path); + return false; +} + +bool pathAccessible(const Path & path) +{ + try { + return pathExists(path); + } catch (SysError & e) { + // swallow EPERM + if (e.errNo == EPERM) return false; + throw; + } +} + + +Path readLink(const Path & path) +{ + checkInterrupt(); + std::vector buf; + for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) { + buf.resize(bufSize); + ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); + if (rlSize == -1) + if (errno == EINVAL) + throw Error("'%1%' is not a symlink", path); + else + throw SysError("reading symbolic link '%1%'", path); + else if (rlSize < bufSize) + return std::string(buf.data(), rlSize); + } +} + + +bool isLink(const Path & path) +{ + struct stat st = lstat(path); + return S_ISLNK(st.st_mode); +} + + +DirEntries readDirectory(DIR *dir, const Path & path) +{ + DirEntries entries; + entries.reserve(64); + + struct dirent * dirent; + while (errno = 0, dirent = readdir(dir)) { /* sic */ + checkInterrupt(); + std::string name = dirent->d_name; + if (name == "." || name == "..") continue; + entries.emplace_back(name, dirent->d_ino, +#ifdef HAVE_STRUCT_DIRENT_D_TYPE + dirent->d_type +#else + DT_UNKNOWN +#endif + ); + } + if (errno) throw SysError("reading directory '%1%'", path); + + return entries; +} + +DirEntries readDirectory(const Path & path) +{ + AutoCloseDir dir(opendir(path.c_str())); + if (!dir) throw SysError("opening directory '%1%'", path); + + return readDirectory(dir.get(), path); +} + + +unsigned char getFileType(const Path & path) +{ + struct stat st = lstat(path); + if (S_ISDIR(st.st_mode)) return DT_DIR; + if (S_ISLNK(st.st_mode)) return DT_LNK; + if (S_ISREG(st.st_mode)) return DT_REG; + return DT_UNKNOWN; +} + + +std::string readFile(const Path & path) +{ + AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) + throw SysError("opening file '%1%'", path); + return readFile(fd.get()); +} + + +void readFile(const Path & path, Sink & sink) +{ + AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) + throw SysError("opening file '%s'", path); + drainFD(fd.get(), sink); +} + + +void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) +{ + AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); + if (!fd) + throw SysError("opening file '%1%'", path); + try { + writeFull(fd.get(), s); + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; + } + if (sync) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); + if (sync) + syncParent(path); +} + + +void writeFile(const Path & path, Source & source, mode_t mode, bool sync) +{ + AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); + if (!fd) + throw SysError("opening file '%1%'", path); + + std::vector buf(64 * 1024); + + try { + while (true) { + try { + auto n = source.read(buf.data(), buf.size()); + writeFull(fd.get(), {buf.data(), n}); + } catch (EndOfFile &) { break; } + } + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; + } + if (sync) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); + if (sync) + syncParent(path); +} + +void syncParent(const Path & path) +{ + AutoCloseFD fd = open(dirOf(path).c_str(), O_RDONLY, 0); + if (!fd) + throw SysError("opening file '%1%'", path); + fd.fsync(); +} + + +static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed) +{ + checkInterrupt(); + + std::string name(baseNameOf(path)); + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { + if (errno == ENOENT) return; + throw SysError("getting status of '%1%'", path); + } + + if (!S_ISDIR(st.st_mode)) { + /* We are about to delete a file. Will it likely free space? */ + + switch (st.st_nlink) { + /* Yes: last link. */ + case 1: + bytesFreed += st.st_size; + break; + /* Maybe: yes, if 'auto-optimise-store' or manual optimisation + was performed. Instead of checking for real let's assume + it's an optimised file and space will be freed. + + In worst case we will double count on freed space for files + with exactly two hardlinks for unoptimised packages. + */ + case 2: + bytesFreed += st.st_size; + break; + /* No: 3+ links. */ + default: + break; + } + } + + if (S_ISDIR(st.st_mode)) { + /* Make the directory accessible. */ + const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; + if ((st.st_mode & PERM_MASK) != PERM_MASK) { + if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) + throw SysError("chmod '%1%'", path); + } + + int fd = openat(parentfd, path.c_str(), O_RDONLY); + if (fd == -1) + throw SysError("opening directory '%1%'", path); + AutoCloseDir dir(fdopendir(fd)); + if (!dir) + throw SysError("opening directory '%1%'", path); + for (auto & i : readDirectory(dir.get(), path)) + _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed); + } + + int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; + if (unlinkat(parentfd, name.c_str(), flags) == -1) { + if (errno == ENOENT) return; + throw SysError("cannot unlink '%1%'", path); + } +} + +static void _deletePath(const Path & path, uint64_t & bytesFreed) +{ + Path dir = dirOf(path); + if (dir == "") + dir = "/"; + + AutoCloseFD dirfd{open(dir.c_str(), O_RDONLY)}; + if (!dirfd) { + if (errno == ENOENT) return; + throw SysError("opening directory '%1%'", path); + } + + _deletePath(dirfd.get(), path, bytesFreed); +} + + +void deletePath(const Path & path) +{ + uint64_t dummy; + deletePath(path, dummy); +} + + +Paths createDirs(const Path & path) +{ + Paths created; + if (path == "/") return created; + + struct stat st; + if (lstat(path.c_str(), &st) == -1) { + created = createDirs(dirOf(path)); + if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST) + throw SysError("creating directory '%1%'", path); + st = lstat(path); + created.push_back(path); + } + + if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1) + throw SysError("statting symlink '%1%'", path); + + if (!S_ISDIR(st.st_mode)) throw Error("'%1%' is not a directory", path); + + return created; +} + + +void deletePath(const Path & path, uint64_t & bytesFreed) +{ + //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path); + bytesFreed = 0; + _deletePath(path, bytesFreed); +} + + +////////////////////////////////////////////////////////////////////// + +AutoDelete::AutoDelete() : del{false} {} + +AutoDelete::AutoDelete(const std::string & p, bool recursive) : path(p) +{ + del = true; + this->recursive = recursive; +} + +AutoDelete::~AutoDelete() +{ + try { + if (del) { + if (recursive) + deletePath(path); + else { + if (remove(path.c_str()) == -1) + throw SysError("cannot unlink '%1%'", path); + } + } + } catch (...) { + ignoreException(); + } +} + +void AutoDelete::cancel() +{ + del = false; +} + +void AutoDelete::reset(const Path & p, bool recursive) { + path = p; + this->recursive = recursive; + del = true; +} + +////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////// + +static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, + std::atomic & counter) +{ + tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); + if (includePid) + return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++); + else + return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++); +} + +Path createTempDir(const Path & tmpRoot, const Path & prefix, + bool includePid, bool useGlobalCounter, mode_t mode) +{ + static std::atomic globalCounter = 0; + std::atomic localCounter = 0; + auto & counter(useGlobalCounter ? globalCounter : localCounter); + + while (1) { + checkInterrupt(); + Path tmpDir = tempName(tmpRoot, prefix, includePid, counter); + if (mkdir(tmpDir.c_str(), mode) == 0) { +#if __FreeBSD__ + /* Explicitly set the group of the directory. This is to + work around around problems caused by BSD's group + ownership semantics (directories inherit the group of + the parent). For instance, the group of /tmp on + FreeBSD is "wheel", so all directories created in /tmp + will be owned by "wheel"; but if the user is not in + "wheel", then "tar" will fail to unpack archives that + have the setgid bit set on directories. */ + if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0) + throw SysError("setting group of directory '%1%'", tmpDir); +#endif + return tmpDir; + } + if (errno != EEXIST) + throw SysError("creating directory '%1%'", tmpDir); + } +} + + +std::pair createTempFile(const Path & prefix) +{ + Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX"); + // Strictly speaking, this is UB, but who cares... + // FIXME: use O_TMPFILE. + AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); + if (!fd) + throw SysError("creating temporary file '%s'", tmpl); + closeOnExec(fd.get()); + return {std::move(fd), tmpl}; +} + +void createSymlink(const Path & target, const Path & link) +{ + if (symlink(target.c_str(), link.c_str())) + throw SysError("creating symlink from '%1%' to '%2%'", link, target); +} + +void replaceSymlink(const Path & target, const Path & link) +{ + for (unsigned int n = 0; true; n++) { + Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); + + try { + createSymlink(target, tmp); + } catch (SysError & e) { + if (e.errNo == EEXIST) continue; + throw; + } + + renameFile(tmp, link); + + break; + } +} + +void setWriteTime(const fs::path & p, const struct stat & st) +{ + struct timeval times[2]; + times[0] = { + .tv_sec = st.st_atime, + .tv_usec = 0, + }; + times[1] = { + .tv_sec = st.st_mtime, + .tv_usec = 0, + }; + if (lutimes(p.c_str(), times) != 0) + throw SysError("changing modification time of '%s'", p); +} + +void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) +{ + // TODO: Rewrite the `is_*` to use `symlink_status()` + auto statOfFrom = lstat(from.path().c_str()); + auto fromStatus = from.symlink_status(); + + // Mark the directory as writable so that we can delete its children + if (andDelete && fs::is_directory(fromStatus)) { + fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + } + + + if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) { + fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); + } else if (fs::is_directory(fromStatus)) { + fs::create_directory(to); + for (auto & entry : fs::directory_iterator(from.path())) { + copy(entry, to / entry.path().filename(), andDelete); + } + } else { + throw Error("file '%s' has an unsupported type", from.path()); + } + + setWriteTime(to, statOfFrom); + if (andDelete) { + if (!fs::is_symlink(fromStatus)) + fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + fs::remove(from.path()); + } +} + +void renameFile(const Path & oldName, const Path & newName) +{ + fs::rename(oldName, newName); +} + +void moveFile(const Path & oldName, const Path & newName) +{ + try { + renameFile(oldName, newName); + } catch (fs::filesystem_error & e) { + auto oldPath = fs::path(oldName); + auto newPath = fs::path(newName); + // For the move to be as atomic as possible, copy to a temporary + // directory + fs::path temp = createTempDir(newPath.parent_path(), "rename-tmp"); + Finally removeTemp = [&]() { fs::remove(temp); }; + auto tempCopyTarget = temp / "copy-target"; + if (e.code().value() == EXDEV) { + fs::remove(newPath); + warn("Can’t rename %s as %s, copying instead", oldName, newName); + copy(fs::directory_entry(oldPath), tempCopyTarget, true); + renameFile(tempCopyTarget, newPath); + } + } +} + +////////////////////////////////////////////////////////////////////// + +} diff --git a/src/libutil/file-system.hh b/src/libutil/file-system.hh new file mode 100644 index 000000000..4637507b3 --- /dev/null +++ b/src/libutil/file-system.hh @@ -0,0 +1,238 @@ +#pragma once +/** + * @file + * + * Utiltities for working with the file sytem and file paths. + */ + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "file-descriptor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#ifndef HAVE_STRUCT_DIRENT_D_TYPE +#define DT_UNKNOWN 0 +#define DT_REG 1 +#define DT_LNK 2 +#define DT_DIR 3 +#endif + +namespace nix { + +struct Sink; +struct Source; + +/** + * @return An absolutized path, resolving paths relative to the + * specified directory, or the current directory otherwise. The path + * is also canonicalised. + */ +Path absPath(Path path, + std::optional dir = {}, + bool resolveSymlinks = false); + +/** + * Canonicalise a path by removing all `.` or `..` components and + * double or trailing slashes. Optionally resolves all symlink + * components such that each component of the resulting path is *not* + * a symbolic link. + */ +Path canonPath(PathView path, bool resolveSymlinks = false); + +/** + * @return The directory part of the given canonical path, i.e., + * everything before the final `/`. If the path is the root or an + * immediate child thereof (e.g., `/foo`), this means `/` + * is returned. + */ +Path dirOf(const PathView path); + +/** + * @return the base name of the given canonical path, i.e., everything + * following the final `/` (trailing slashes are removed). + */ +std::string_view baseNameOf(std::string_view path); + +/** + * Check whether 'path' is a descendant of 'dir'. Both paths must be + * canonicalized. + */ +bool isInDir(std::string_view path, std::string_view dir); + +/** + * Check whether 'path' is equal to 'dir' or a descendant of + * 'dir'. Both paths must be canonicalized. + */ +bool isDirOrInDir(std::string_view path, std::string_view dir); + +/** + * Get status of `path`. + */ +struct stat stat(const Path & path); +struct stat lstat(const Path & path); + +/** + * @return true iff the given path exists. + */ +bool pathExists(const Path & path); + +/** + * A version of pathExists that returns false on a permission error. + * Useful for inferring default paths across directories that might not + * be readable. + * @return true iff the given path can be accessed and exists + */ +bool pathAccessible(const Path & path); + +/** + * Read the contents (target) of a symbolic link. The result is not + * in any way canonicalised. + */ +Path readLink(const Path & path); + +bool isLink(const Path & path); + +/** + * Read the contents of a directory. The entries `.` and `..` are + * removed. + */ +struct DirEntry +{ + std::string name; + ino_t ino; + /** + * one of DT_* + */ + unsigned char type; + DirEntry(std::string name, ino_t ino, unsigned char type) + : name(std::move(name)), ino(ino), type(type) { } +}; + +typedef std::vector DirEntries; + +DirEntries readDirectory(const Path & path); + +unsigned char getFileType(const Path & path); + +/** + * Read the contents of a file into a string. + */ +std::string readFile(const Path & path); +void readFile(const Path & path, Sink & sink); + +/** + * Write a string to a file. + */ +void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); + +void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); + +/** + * Flush a file's parent directory to disk + */ +void syncParent(const Path & path); + +/** + * Delete a path; i.e., in the case of a directory, it is deleted + * recursively. It's not an error if the path does not exist. The + * second variant returns the number of bytes and blocks freed. + */ +void deletePath(const Path & path); + +void deletePath(const Path & path, uint64_t & bytesFreed); + +/** + * Create a directory and all its parents, if necessary. Returns the + * list of created directories, in order of creation. + */ +Paths createDirs(const Path & path); +inline Paths createDirs(PathView path) +{ + return createDirs(Path(path)); +} + +/** + * Create a symlink. + */ +void createSymlink(const Path & target, const Path & link); + +/** + * Atomically create or replace a symlink. + */ +void replaceSymlink(const Path & target, const Path & link); + +void renameFile(const Path & src, const Path & dst); + +/** + * Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst` + * are on a different filesystem. + * + * Beware that this might not be atomic because of the copy that happens behind + * the scenes + */ +void moveFile(const Path & src, const Path & dst); + + +/** + * Automatic cleanup of resources. + */ +class AutoDelete +{ + Path path; + bool del; + bool recursive; +public: + AutoDelete(); + AutoDelete(const Path & p, bool recursive = true); + ~AutoDelete(); + void cancel(); + void reset(const Path & p, bool recursive = true); + operator Path() const { return path; } + operator PathView() const { return path; } +}; + + +struct DIRDeleter +{ + void operator()(DIR * dir) const { + closedir(dir); + } +}; + +typedef std::unique_ptr AutoCloseDir; + + +/** + * Create a temporary directory. + */ +Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", + bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); + +/** + * Create a temporary file, returning a file handle and its path. + */ +std::pair createTempFile(const Path & prefix = "nix"); + + +/** + * Used in various places. + */ +typedef std::function PathFilter; + +extern PathFilter defaultPathFilter; + +} diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc deleted file mode 100644 index 11cc0c0e7..000000000 --- a/src/libutil/filesystem.cc +++ /dev/null @@ -1,162 +0,0 @@ -#include -#include -#include - -#include "finally.hh" -#include "util.hh" -#include "types.hh" - -namespace fs = std::filesystem; - -namespace nix { - -static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, - std::atomic & counter) -{ - tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); - if (includePid) - return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++); - else - return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++); -} - -Path createTempDir(const Path & tmpRoot, const Path & prefix, - bool includePid, bool useGlobalCounter, mode_t mode) -{ - static std::atomic globalCounter = 0; - std::atomic localCounter = 0; - auto & counter(useGlobalCounter ? globalCounter : localCounter); - - while (1) { - checkInterrupt(); - Path tmpDir = tempName(tmpRoot, prefix, includePid, counter); - if (mkdir(tmpDir.c_str(), mode) == 0) { -#if __FreeBSD__ - /* Explicitly set the group of the directory. This is to - work around around problems caused by BSD's group - ownership semantics (directories inherit the group of - the parent). For instance, the group of /tmp on - FreeBSD is "wheel", so all directories created in /tmp - will be owned by "wheel"; but if the user is not in - "wheel", then "tar" will fail to unpack archives that - have the setgid bit set on directories. */ - if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0) - throw SysError("setting group of directory '%1%'", tmpDir); -#endif - return tmpDir; - } - if (errno != EEXIST) - throw SysError("creating directory '%1%'", tmpDir); - } -} - - -std::pair createTempFile(const Path & prefix) -{ - Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX"); - // Strictly speaking, this is UB, but who cares... - // FIXME: use O_TMPFILE. - AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); - if (!fd) - throw SysError("creating temporary file '%s'", tmpl); - closeOnExec(fd.get()); - return {std::move(fd), tmpl}; -} - -void createSymlink(const Path & target, const Path & link) -{ - if (symlink(target.c_str(), link.c_str())) - throw SysError("creating symlink from '%1%' to '%2%'", link, target); -} - -void replaceSymlink(const Path & target, const Path & link) -{ - for (unsigned int n = 0; true; n++) { - Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); - - try { - createSymlink(target, tmp); - } catch (SysError & e) { - if (e.errNo == EEXIST) continue; - throw; - } - - renameFile(tmp, link); - - break; - } -} - -void setWriteTime(const fs::path & p, const struct stat & st) -{ - struct timeval times[2]; - times[0] = { - .tv_sec = st.st_atime, - .tv_usec = 0, - }; - times[1] = { - .tv_sec = st.st_mtime, - .tv_usec = 0, - }; - if (lutimes(p.c_str(), times) != 0) - throw SysError("changing modification time of '%s'", p); -} - -void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) -{ - // TODO: Rewrite the `is_*` to use `symlink_status()` - auto statOfFrom = lstat(from.path().c_str()); - auto fromStatus = from.symlink_status(); - - // Mark the directory as writable so that we can delete its children - if (andDelete && fs::is_directory(fromStatus)) { - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); - } - - - if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) { - fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); - } else if (fs::is_directory(fromStatus)) { - fs::create_directory(to); - for (auto & entry : fs::directory_iterator(from.path())) { - copy(entry, to / entry.path().filename(), andDelete); - } - } else { - throw Error("file '%s' has an unsupported type", from.path()); - } - - setWriteTime(to, statOfFrom); - if (andDelete) { - if (!fs::is_symlink(fromStatus)) - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); - fs::remove(from.path()); - } -} - -void renameFile(const Path & oldName, const Path & newName) -{ - fs::rename(oldName, newName); -} - -void moveFile(const Path & oldName, const Path & newName) -{ - try { - renameFile(oldName, newName); - } catch (fs::filesystem_error & e) { - auto oldPath = fs::path(oldName); - auto newPath = fs::path(newName); - // For the move to be as atomic as possible, copy to a temporary - // directory - fs::path temp = createTempDir(newPath.parent_path(), "rename-tmp"); - Finally removeTemp = [&]() { fs::remove(temp); }; - auto tempCopyTarget = temp / "copy-target"; - if (e.code().value() == EXDEV) { - fs::remove(newPath); - warn("Can’t rename %s as %s, copying instead", oldName, newName); - copy(fs::directory_entry(oldPath), tempCopyTarget, true); - renameFile(tempCopyTarget, newPath); - } - } -} - -} diff --git a/src/libutil/fs-sink.hh b/src/libutil/fs-sink.hh index c22edd390..bf54b7301 100644 --- a/src/libutil/fs-sink.hh +++ b/src/libutil/fs-sink.hh @@ -4,6 +4,7 @@ #include "types.hh" #include "serialise.hh" #include "source-accessor.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index e297c245b..144f7ae7e 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -9,7 +9,6 @@ #include "hash.hh" #include "archive.hh" #include "split.hh" -#include "util.hh" #include #include diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index cab3e6eca..6ade6555c 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -3,6 +3,7 @@ #include "types.hh" #include "serialise.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 9d7a141b3..60b0865bf 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -1,4 +1,7 @@ #include "logging.hh" +#include "file-descriptor.hh" +#include "environment-variables.hh" +#include "terminal.hh" #include "util.hh" #include "config.hh" diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc new file mode 100644 index 000000000..f34f6c091 --- /dev/null +++ b/src/libutil/memory-source-accessor.cc @@ -0,0 +1,124 @@ +#include "memory-source-accessor.hh" + +namespace nix { + +MemorySourceAccessor::File * +MemorySourceAccessor::open(const CanonPath & path, std::optional create) +{ + File * cur = &root; + + bool newF = false; + + for (std::string_view name : path) + { + auto * curDirP = std::get_if(&cur->raw); + if (!curDirP) + return nullptr; + auto & curDir = *curDirP; + + auto i = curDir.contents.find(name); + if (i == curDir.contents.end()) { + if (!create) + return nullptr; + else { + newF = true; + i = curDir.contents.insert(i, { + std::string { name }, + File::Directory {}, + }); + } + } + cur = &i->second; + } + + if (newF && create) *cur = std::move(*create); + + return cur; +} + +std::string MemorySourceAccessor::readFile(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * r = std::get_if(&f->raw)) + return r->contents; + else + throw Error("file '%s' is not a regular file", path); +} + +bool MemorySourceAccessor::pathExists(const CanonPath & path) +{ + return open(path, std::nullopt); +} + +MemorySourceAccessor::Stat MemorySourceAccessor::File::lstat() const +{ + return std::visit(overloaded { + [](const Regular & r) { + return Stat { + .type = tRegular, + .fileSize = r.contents.size(), + .isExecutable = r.executable, + }; + }, + [](const Directory &) { + return Stat { + .type = tDirectory, + }; + }, + [](const Symlink &) { + return Stat { + .type = tSymlink, + }; + }, + }, this->raw); +} + +std::optional +MemorySourceAccessor::maybeLstat(const CanonPath & path) +{ + const auto * f = open(path, std::nullopt); + return f ? std::optional { f->lstat() } : std::nullopt; +} + +MemorySourceAccessor::DirEntries MemorySourceAccessor::readDirectory(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * d = std::get_if(&f->raw)) { + DirEntries res; + for (auto & [name, file] : d->contents) + res.insert_or_assign(name, file.lstat().type); + return res; + } else + throw Error("file '%s' is not a directory", path); + return {}; +} + +std::string MemorySourceAccessor::readLink(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * s = std::get_if(&f->raw)) + return s->target; + else + throw Error("file '%s' is not a symbolic link", path); +} + +CanonPath MemorySourceAccessor::addFile(CanonPath path, std::string && contents) +{ + auto * f = open(path, File { File::Regular {} }); + if (!f) + throw Error("file '%s' cannot be made because some parent file is not a directory", path); + if (auto * r = std::get_if(&f->raw)) + r->contents = std::move(contents); + else + throw Error("file '%s' is not a regular file", path); + + return path; +} + +} diff --git a/src/libutil/memory-source-accessor.hh b/src/libutil/memory-source-accessor.hh new file mode 100644 index 000000000..014fa8098 --- /dev/null +++ b/src/libutil/memory-source-accessor.hh @@ -0,0 +1,74 @@ +#include "source-accessor.hh" +#include "variant-wrapper.hh" + +namespace nix { + +/** + * An source accessor for an in-memory file system. + */ +struct MemorySourceAccessor : virtual SourceAccessor +{ + /** + * In addition to being part of the implementation of + * `MemorySourceAccessor`, this has a side benefit of nicely + * defining what a "file system object" is in Nix. + */ + struct File { + struct Regular { + bool executable = false; + std::string contents; + + GENERATE_CMP(Regular, me->executable, me->contents); + }; + + struct Directory { + using Name = std::string; + + std::map> contents; + + GENERATE_CMP(Directory, me->contents); + }; + + struct Symlink { + std::string target; + + GENERATE_CMP(Symlink, me->target); + }; + + using Raw = std::variant; + Raw raw; + + MAKE_WRAPPER_CONSTRUCTOR(File); + + GENERATE_CMP(File, me->raw); + + Stat lstat() const; + }; + + File root { File::Directory {} }; + + GENERATE_CMP(MemorySourceAccessor, me->root); + + std::string readFile(const CanonPath & path) override; + bool pathExists(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; + DirEntries readDirectory(const CanonPath & path) override; + std::string readLink(const CanonPath & path) override; + + /** + * @param create If present, create this file and any parent directories + * that are needed. + * + * Return null if + * + * - `create = false`: File does not exist. + * + * - `create = true`: some parent file was not a dir, so couldn't + * look/create inside. + */ + File * open(const CanonPath & path, std::optional create); + + CanonPath addFile(CanonPath path, std::string && contents); +}; + +} diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh index 86d0115fc..228fb13f8 100644 --- a/src/libutil/monitor-fd.hh +++ b/src/libutil/monitor-fd.hh @@ -10,6 +10,8 @@ #include #include +#include "signals.hh" + namespace nix { diff --git a/src/libutil/namespaces.cc b/src/libutil/namespaces.cc index f66accb10..a789b321e 100644 --- a/src/libutil/namespaces.cc +++ b/src/libutil/namespaces.cc @@ -1,13 +1,22 @@ -#if __linux__ - -#include "namespaces.hh" +#include "current-process.hh" #include "util.hh" #include "finally.hh" +#include "file-system.hh" +#include "processes.hh" +#include "signals.hh" + +#if __linux__ +# include +# include +# include "cgroup.hh" +#endif #include namespace nix { +#if __linux__ + bool userNamespacesSupported() { static auto res = [&]() -> bool @@ -92,6 +101,60 @@ bool mountAndPidNamespacesSupported() return res; } +#endif + + +////////////////////////////////////////////////////////////////////// + +#if __linux__ +static AutoCloseFD fdSavedMountNamespace; +static AutoCloseFD fdSavedRoot; +#endif + +void saveMountNamespace() +{ +#if __linux__ + static std::once_flag done; + std::call_once(done, []() { + fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY); + if (!fdSavedMountNamespace) + throw SysError("saving parent mount namespace"); + + fdSavedRoot = open("/proc/self/root", O_RDONLY); + }); +#endif } +void restoreMountNamespace() +{ +#if __linux__ + try { + auto savedCwd = absPath("."); + + if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1) + throw SysError("restoring parent mount namespace"); + + if (fdSavedRoot) { + if (fchdir(fdSavedRoot.get())) + throw SysError("chdir into saved root"); + if (chroot(".")) + throw SysError("chroot into saved root"); + } + + if (chdir(savedCwd.c_str()) == -1) + throw SysError("restoring cwd"); + } catch (Error & e) { + debug(e.msg()); + } #endif +} + +void unshareFilesystem() +{ +#ifdef __linux__ + if (unshare(CLONE_FS) != 0 && errno != EPERM) + throw SysError("unsharing filesystem state in download thread"); +#endif +} + +} diff --git a/src/libutil/namespaces.hh b/src/libutil/namespaces.hh index 0b7eeb66c..7e4e921a8 100644 --- a/src/libutil/namespaces.hh +++ b/src/libutil/namespaces.hh @@ -1,8 +1,31 @@ #pragma once ///@file +#include + +#include "types.hh" + namespace nix { +/** + * Save the current mount namespace. Ignored if called more than + * once. + */ +void saveMountNamespace(); + +/** + * Restore the mount namespace saved by saveMountNamespace(). Ignored + * if saveMountNamespace() was never called. + */ +void restoreMountNamespace(); + +/** + * Cause this thread to not share any FS attributes with the main + * thread, because this causes setns() in restoreMountNamespace() to + * fail. + */ +void unshareFilesystem(); + #if __linux__ bool userNamespacesSupported(); diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index d5e32d989..dc96f84e5 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -1,4 +1,5 @@ #include "posix-source-accessor.hh" +#include "signals.hh" namespace nix { diff --git a/src/libutil/processes.cc b/src/libutil/processes.cc new file mode 100644 index 000000000..91a0ea66f --- /dev/null +++ b/src/libutil/processes.cc @@ -0,0 +1,421 @@ +#include "current-process.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "processes.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef __APPLE__ +# include +#endif + +#ifdef __linux__ +# include +# include +#endif + + +namespace nix { + +Pid::Pid() +{ +} + + +Pid::Pid(pid_t pid) + : pid(pid) +{ +} + + +Pid::~Pid() +{ + if (pid != -1) kill(); +} + + +void Pid::operator =(pid_t pid) +{ + if (this->pid != -1 && this->pid != pid) kill(); + this->pid = pid; + killSignal = SIGKILL; // reset signal to default +} + + +Pid::operator pid_t() +{ + return pid; +} + + +int Pid::kill() +{ + assert(pid != -1); + + debug("killing process %1%", pid); + + /* Send the requested signal to the child. If it has its own + process group, send the signal to every process in the child + process group (which hopefully includes *all* its children). */ + if (::kill(separatePG ? -pid : pid, killSignal) != 0) { + /* On BSDs, killing a process group will return EPERM if all + processes in the group are zombies (or something like + that). So try to detect and ignore that situation. */ +#if __FreeBSD__ || __APPLE__ + if (errno != EPERM || ::kill(pid, 0) != 0) +#endif + logError(SysError("killing process %d", pid).info()); + } + + return wait(); +} + + +int Pid::wait() +{ + assert(pid != -1); + while (1) { + int status; + int res = waitpid(pid, &status, 0); + if (res == pid) { + pid = -1; + return status; + } + if (errno != EINTR) + throw SysError("cannot get exit status of PID %d", pid); + checkInterrupt(); + } +} + + +void Pid::setSeparatePG(bool separatePG) +{ + this->separatePG = separatePG; +} + + +void Pid::setKillSignal(int signal) +{ + this->killSignal = signal; +} + + +pid_t Pid::release() +{ + pid_t p = pid; + pid = -1; + return p; +} + + +void killUser(uid_t uid) +{ + debug("killing all processes running under uid '%1%'", uid); + + assert(uid != 0); /* just to be safe... */ + + /* The system call kill(-1, sig) sends the signal `sig' to all + users to which the current process can send signals. So we + fork a process, switch to uid, and send a mass kill. */ + + Pid pid = startProcess([&]() { + + if (setuid(uid) == -1) + throw SysError("setting uid"); + + while (true) { +#ifdef __APPLE__ + /* OSX's kill syscall takes a third parameter that, among + other things, determines if kill(-1, signo) affects the + calling process. In the OSX libc, it's set to true, + which means "follow POSIX", which we don't want here + */ + if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break; +#else + if (kill(-1, SIGKILL) == 0) break; +#endif + if (errno == ESRCH || errno == EPERM) break; /* no more processes */ + if (errno != EINTR) + throw SysError("cannot kill processes for uid '%1%'", uid); + } + + _exit(0); + }); + + int status = pid.wait(); + if (status != 0) + throw Error("cannot kill processes for uid '%1%': %2%", uid, statusToString(status)); + + /* !!! We should really do some check to make sure that there are + no processes left running under `uid', but there is no portable + way to do so (I think). The most reliable way may be `ps -eo + uid | grep -q $uid'. */ +} + + +////////////////////////////////////////////////////////////////////// + + +/* Wrapper around vfork to prevent the child process from clobbering + the caller's stack frame in the parent. */ +static pid_t doFork(bool allowVfork, std::function fun) __attribute__((noinline)); +static pid_t doFork(bool allowVfork, std::function fun) +{ +#ifdef __linux__ + pid_t pid = allowVfork ? vfork() : fork(); +#else + pid_t pid = fork(); +#endif + if (pid != 0) return pid; + fun(); + abort(); +} + + +#if __linux__ +static int childEntry(void * arg) +{ + auto main = (std::function *) arg; + (*main)(); + return 1; +} +#endif + + +pid_t startProcess(std::function fun, const ProcessOptions & options) +{ + std::function wrapper = [&]() { + if (!options.allowVfork) + logger = makeSimpleLogger(); + try { +#if __linux__ + if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) + throw SysError("setting death signal"); +#endif + fun(); + } catch (std::exception & e) { + try { + std::cerr << options.errorPrefix << e.what() << "\n"; + } catch (...) { } + } catch (...) { } + if (options.runExitHandlers) + exit(1); + else + _exit(1); + }; + + pid_t pid = -1; + + if (options.cloneFlags) { + #ifdef __linux__ + // Not supported, since then we don't know when to free the stack. + assert(!(options.cloneFlags & CLONE_VM)); + + size_t stackSize = 1 * 1024 * 1024; + auto stack = (char *) mmap(0, stackSize, + PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) throw SysError("allocating stack"); + + Finally freeStack([&]() { munmap(stack, stackSize); }); + + pid = clone(childEntry, stack + stackSize, options.cloneFlags | SIGCHLD, &wrapper); + #else + throw Error("clone flags are only supported on Linux"); + #endif + } else + pid = doFork(options.allowVfork, wrapper); + + if (pid == -1) throw SysError("unable to fork"); + + return pid; +} + + +std::string runProgram(Path program, bool searchPath, const Strings & args, + const std::optional & input, bool isInteractive) +{ + auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input, .isInteractive = isInteractive}); + + if (!statusOk(res.first)) + throw ExecError(res.first, "program '%1%' %2%", program, statusToString(res.first)); + + return res.second; +} + +// Output = error code + "standard out" output stream +std::pair runProgram(RunOptions && options) +{ + StringSink sink; + options.standardOut = &sink; + + int status = 0; + + try { + runProgram2(options); + } catch (ExecError & e) { + status = e.status; + } + + return {status, std::move(sink.s)}; +} + +void runProgram2(const RunOptions & options) +{ + checkInterrupt(); + + assert(!(options.standardIn && options.input)); + + std::unique_ptr source_; + Source * source = options.standardIn; + + if (options.input) { + source_ = std::make_unique(*options.input); + source = source_.get(); + } + + /* Create a pipe. */ + Pipe out, in; + if (options.standardOut) out.create(); + if (source) in.create(); + + ProcessOptions processOptions; + // vfork implies that the environment of the main process and the fork will + // be shared (technically this is undefined, but in practice that's the + // case), so we can't use it if we alter the environment + processOptions.allowVfork = !options.environment; + + std::optional>> resumeLoggerDefer; + if (options.isInteractive) { + logger->pause(); + resumeLoggerDefer.emplace( + []() { + logger->resume(); + } + ); + } + + /* Fork. */ + Pid pid = startProcess([&]() { + if (options.environment) + replaceEnv(*options.environment); + if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("dupping stdout"); + if (options.mergeStderrToStdout) + if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) + throw SysError("cannot dup stdout into stderr"); + if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1) + throw SysError("dupping stdin"); + + if (options.chdir && chdir((*options.chdir).c_str()) == -1) + throw SysError("chdir failed"); + if (options.gid && setgid(*options.gid) == -1) + throw SysError("setgid failed"); + /* Drop all other groups if we're setgid. */ + if (options.gid && setgroups(0, 0) == -1) + throw SysError("setgroups failed"); + if (options.uid && setuid(*options.uid) == -1) + throw SysError("setuid failed"); + + Strings args_(options.args); + args_.push_front(options.program); + + restoreProcessContext(); + + if (options.searchPath) + execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); + // This allows you to refer to a program with a pathname relative + // to the PATH variable. + else + execv(options.program.c_str(), stringsToCharPtrs(args_).data()); + + throw SysError("executing '%1%'", options.program); + }, processOptions); + + out.writeSide.close(); + + std::thread writerThread; + + std::promise promise; + + Finally doJoin([&]() { + if (writerThread.joinable()) + writerThread.join(); + }); + + + if (source) { + in.readSide.close(); + writerThread = std::thread([&]() { + try { + std::vector buf(8 * 1024); + while (true) { + size_t n; + try { + n = source->read(buf.data(), buf.size()); + } catch (EndOfFile &) { + break; + } + writeFull(in.writeSide.get(), {buf.data(), n}); + } + promise.set_value(); + } catch (...) { + promise.set_exception(std::current_exception()); + } + in.writeSide.close(); + }); + } + + if (options.standardOut) + drainFD(out.readSide.get(), *options.standardOut); + + /* Wait for the child to finish. */ + int status = pid.wait(); + + /* Wait for the writer thread to finish. */ + if (source) promise.get_future().get(); + + if (status) + throw ExecError(status, "program '%1%' %2%", options.program, statusToString(status)); +} + +////////////////////////////////////////////////////////////////////// + +std::string statusToString(int status) +{ + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + if (WIFEXITED(status)) + return fmt("failed with exit code %1%", WEXITSTATUS(status)); + else if (WIFSIGNALED(status)) { + int sig = WTERMSIG(status); +#if HAVE_STRSIGNAL + const char * description = strsignal(sig); + return fmt("failed due to signal %1% (%2%)", sig, description); +#else + return fmt("failed due to signal %1%", sig); +#endif + } + else + return "died abnormally"; + } else return "succeeded"; +} + + +bool statusOk(int status) +{ + return WIFEXITED(status) && WEXITSTATUS(status) == 0; +} + +} diff --git a/src/libutil/processes.hh b/src/libutil/processes.hh new file mode 100644 index 000000000..978c37105 --- /dev/null +++ b/src/libutil/processes.hh @@ -0,0 +1,123 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "ansicolor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace nix { + +struct Sink; +struct Source; + +class Pid +{ + pid_t pid = -1; + bool separatePG = false; + int killSignal = SIGKILL; +public: + Pid(); + Pid(pid_t pid); + ~Pid(); + void operator =(pid_t pid); + operator pid_t(); + int kill(); + int wait(); + + void setSeparatePG(bool separatePG); + void setKillSignal(int signal); + pid_t release(); +}; + + +/** + * Kill all processes running under the specified uid by sending them + * a SIGKILL. + */ +void killUser(uid_t uid); + + +/** + * Fork a process that runs the given function, and return the child + * pid to the caller. + */ +struct ProcessOptions +{ + std::string errorPrefix = ""; + bool dieWithParent = true; + bool runExitHandlers = false; + bool allowVfork = false; + /** + * use clone() with the specified flags (Linux only) + */ + int cloneFlags = 0; +}; + +pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions()); + + +/** + * Run a program and return its stdout in a string (i.e., like the + * shell backtick operator). + */ +std::string runProgram(Path program, bool searchPath = false, + const Strings & args = Strings(), + const std::optional & input = {}, bool isInteractive = false); + +struct RunOptions +{ + Path program; + bool searchPath = true; + Strings args; + std::optional uid; + std::optional gid; + std::optional chdir; + std::optional> environment; + std::optional input; + Source * standardIn = nullptr; + Sink * standardOut = nullptr; + bool mergeStderrToStdout = false; + bool isInteractive = false; +}; + +std::pair runProgram(RunOptions && options); + +void runProgram2(const RunOptions & options); + + +class ExecError : public Error +{ +public: + int status; + + template + ExecError(int status, const Args & ... args) + : Error(args...), status(status) + { } +}; + + +/** + * Convert the exit status of a child as returned by wait() into an + * error string. + */ +std::string statusToString(int status); + +bool statusOk(int status); + +} diff --git a/src/libutil/references.cc b/src/libutil/references.cc index 7f59b4c09..9d75606ef 100644 --- a/src/libutil/references.cc +++ b/src/libutil/references.cc @@ -1,6 +1,5 @@ #include "references.hh" #include "hash.hh" -#include "util.hh" #include "archive.hh" #include diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 3d5121a19..725ddbb8d 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -1,5 +1,5 @@ #include "serialise.hh" -#include "util.hh" +#include "signals.hh" #include #include diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 333c254ea..9e07226bf 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -5,6 +5,7 @@ #include "types.hh" #include "util.hh" +#include "file-descriptor.hh" namespace boost::context { struct stack_context; } diff --git a/src/libutil/signals.cc b/src/libutil/signals.cc new file mode 100644 index 000000000..4632aa319 --- /dev/null +++ b/src/libutil/signals.cc @@ -0,0 +1,188 @@ +#include "signals.hh" +#include "util.hh" +#include "error.hh" +#include "sync.hh" +#include "terminal.hh" + +#include + +namespace nix { + +std::atomic _isInterrupted = false; + +static thread_local bool interruptThrown = false; +thread_local std::function interruptCheck; + +void setInterruptThrown() +{ + interruptThrown = true; +} + +void _interrupted() +{ + /* Block user interrupts while an exception is being handled. + Throwing an exception while another exception is being handled + kills the program! */ + if (!interruptThrown && !std::uncaught_exceptions()) { + interruptThrown = true; + throw Interrupted("interrupted by the user"); + } +} + + +////////////////////////////////////////////////////////////////////// + + +/* We keep track of interrupt callbacks using integer tokens, so we can iterate + safely without having to lock the data structure while executing arbitrary + functions. + */ +struct InterruptCallbacks { + typedef int64_t Token; + + /* We use unique tokens so that we can't accidentally delete the wrong + handler because of an erroneous double delete. */ + Token nextToken = 0; + + /* Used as a list, see InterruptCallbacks comment. */ + std::map> callbacks; +}; + +static Sync _interruptCallbacks; + +static void signalHandlerThread(sigset_t set) +{ + while (true) { + int signal = 0; + sigwait(&set, &signal); + + if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP) + triggerInterrupt(); + + else if (signal == SIGWINCH) { + updateWindowSize(); + } + } +} + +void triggerInterrupt() +{ + _isInterrupted = true; + + { + InterruptCallbacks::Token i = 0; + while (true) { + std::function callback; + { + auto interruptCallbacks(_interruptCallbacks.lock()); + auto lb = interruptCallbacks->callbacks.lower_bound(i); + if (lb == interruptCallbacks->callbacks.end()) + break; + + callback = lb->second; + i = lb->first + 1; + } + + try { + callback(); + } catch (...) { + ignoreException(); + } + } + } +} + + +static sigset_t savedSignalMask; +static bool savedSignalMaskIsSet = false; + +void setChildSignalMask(sigset_t * sigs) +{ + assert(sigs); // C style function, but think of sigs as a reference + +#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE + sigemptyset(&savedSignalMask); + // There's no "assign" or "copy" function, so we rely on (math) idempotence + // of the or operator: a or a = a. + sigorset(&savedSignalMask, sigs, sigs); +#else + // Without sigorset, our best bet is to assume that sigset_t is a type that + // can be assigned directly, such as is the case for a sigset_t defined as + // an integer type. + savedSignalMask = *sigs; +#endif + + savedSignalMaskIsSet = true; +} + +void saveSignalMask() { + if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) + throw SysError("querying signal mask"); + + savedSignalMaskIsSet = true; +} + +void startSignalHandlerThread() +{ + updateWindowSize(); + + saveSignalMask(); + + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + sigaddset(&set, SIGPIPE); + sigaddset(&set, SIGWINCH); + if (pthread_sigmask(SIG_BLOCK, &set, nullptr)) + throw SysError("blocking signals"); + + std::thread(signalHandlerThread, set).detach(); +} + +void restoreSignals() +{ + // If startSignalHandlerThread wasn't called, that means we're not running + // in a proper libmain process, but a process that presumably manages its + // own signal handlers. Such a process should call either + // - initNix(), to be a proper libmain process + // - startSignalHandlerThread(), to resemble libmain regarding signal + // handling only + // - saveSignalMask(), for processes that define their own signal handling + // thread + // TODO: Warn about this? Have a default signal mask? The latter depends on + // whether we should generally inherit signal masks from the caller. + // I don't know what the larger unix ecosystem expects from us here. + if (!savedSignalMaskIsSet) + return; + + if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr)) + throw SysError("restoring signals"); +} + + +/* RAII helper to automatically deregister a callback. */ +struct InterruptCallbackImpl : InterruptCallback +{ + InterruptCallbacks::Token token; + ~InterruptCallbackImpl() override + { + auto interruptCallbacks(_interruptCallbacks.lock()); + interruptCallbacks->callbacks.erase(token); + } +}; + +std::unique_ptr createInterruptCallback(std::function callback) +{ + auto interruptCallbacks(_interruptCallbacks.lock()); + auto token = interruptCallbacks->nextToken++; + interruptCallbacks->callbacks.emplace(token, callback); + + auto res = std::make_unique(); + res->token = token; + + return std::unique_ptr(res.release()); +} + +} diff --git a/src/libutil/signals.hh b/src/libutil/signals.hh new file mode 100644 index 000000000..7e8beff33 --- /dev/null +++ b/src/libutil/signals.hh @@ -0,0 +1,104 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "ansicolor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace nix { + +/* User interruption. */ + +extern std::atomic _isInterrupted; + +extern thread_local std::function interruptCheck; + +void setInterruptThrown(); + +void _interrupted(); + +void inline checkInterrupt() +{ + if (_isInterrupted || (interruptCheck && interruptCheck())) + _interrupted(); +} + +MakeError(Interrupted, BaseError); + + +/** + * Start a thread that handles various signals. Also block those signals + * on the current thread (and thus any threads created by it). + * Saves the signal mask before changing the mask to block those signals. + * See saveSignalMask(). + */ +void startSignalHandlerThread(); + +/** + * Saves the signal mask, which is the signal mask that nix will restore + * before creating child processes. + * See setChildSignalMask() to set an arbitrary signal mask instead of the + * current mask. + */ +void saveSignalMask(); + +/** + * To use in a process that already called `startSignalHandlerThread()` + * or `saveSignalMask()` first. + */ +void restoreSignals(); + +/** + * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't + * necessarily match the current thread's mask. + * See saveSignalMask() to set the saved mask to the current mask. + */ +void setChildSignalMask(sigset_t *sigs); + +struct InterruptCallback +{ + virtual ~InterruptCallback() { }; +}; + +/** + * Register a function that gets called on SIGINT (in a non-signal + * context). + */ +std::unique_ptr createInterruptCallback( + std::function callback); + +void triggerInterrupt(); + +/** + * A RAII class that causes the current thread to receive SIGUSR1 when + * the signal handler thread receives SIGINT. That is, this allows + * SIGINT to be multiplexed to multiple threads. + */ +struct ReceiveInterrupts +{ + pthread_t target; + std::unique_ptr callback; + + ReceiveInterrupts() + : target(pthread_self()) + , callback(createInterruptCallback([&]() { pthread_kill(target, SIGUSR1); })) + { } +}; + + +} diff --git a/src/libutil/suggestions.cc b/src/libutil/suggestions.cc index 9510a5f0c..e67e986fb 100644 --- a/src/libutil/suggestions.cc +++ b/src/libutil/suggestions.cc @@ -1,7 +1,9 @@ #include "suggestions.hh" #include "ansicolor.hh" -#include "util.hh" +#include "terminal.hh" + #include +#include namespace nix { diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 5060a8f24..1733c791c 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -3,6 +3,7 @@ #include "serialise.hh" #include "tarfile.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc new file mode 100644 index 000000000..8febc8771 --- /dev/null +++ b/src/libutil/terminal.cc @@ -0,0 +1,108 @@ +#include "terminal.hh" +#include "environment-variables.hh" +#include "sync.hh" + +#include +#include + +namespace nix { + +bool shouldANSI() +{ + return isatty(STDERR_FILENO) + && getEnv("TERM").value_or("dumb") != "dumb" + && !(getEnv("NO_COLOR").has_value() || getEnv("NOCOLOR").has_value()); +} + +std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width) +{ + std::string t, e; + size_t w = 0; + auto i = s.begin(); + + while (w < (size_t) width && i != s.end()) { + + if (*i == '\e') { + std::string e; + e += *i++; + char last = 0; + + if (i != s.end() && *i == '[') { + e += *i++; + // eat parameter bytes + while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; + // eat intermediate bytes + while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; + // eat final byte + if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; + } else { + if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; + } + + if (!filterAll && last == 'm') + t += e; + } + + else if (*i == '\t') { + i++; t += ' '; w++; + while (w < (size_t) width && w % 8) { + t += ' '; w++; + } + } + + else if (*i == '\r' || *i == '\a') + // do nothing for now + i++; + + else { + w++; + // Copy one UTF-8 character. + if ((*i & 0xe0) == 0xc0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } else if ((*i & 0xf0) == 0xe0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } + } else if ((*i & 0xf8) == 0xf0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } + } + } else + t += *i++; + } + } + + return t; +} + + +////////////////////////////////////////////////////////////////////// + +static Sync> windowSize{{0, 0}}; + + +void updateWindowSize() +{ + struct winsize ws; + if (ioctl(2, TIOCGWINSZ, &ws) == 0) { + auto windowSize_(windowSize.lock()); + windowSize_->first = ws.ws_row; + windowSize_->second = ws.ws_col; + } +} + + +std::pair getWindowSize() +{ + return *windowSize.lock(); +} + +} diff --git a/src/libutil/terminal.hh b/src/libutil/terminal.hh new file mode 100644 index 000000000..9cb191308 --- /dev/null +++ b/src/libutil/terminal.hh @@ -0,0 +1,38 @@ +#pragma once +///@file + +#include "types.hh" + +namespace nix { +/** + * Determine whether ANSI escape sequences are appropriate for the + * present output. + */ +bool shouldANSI(); + +/** + * Truncate a string to 'width' printable characters. If 'filterAll' + * is true, all ANSI escape sequences are filtered out. Otherwise, + * some escape sequences (such as colour setting) are copied but not + * included in the character count. Also, tabs are expanded to + * spaces. + */ +std::string filterANSIEscapes(std::string_view s, + bool filterAll = false, + unsigned int width = std::numeric_limits::max()); + +/** + * Recalculate the window size, updating a global variable. Used in the + * `SIGWINCH` signal handler. + */ +void updateWindowSize(); + +/** + * @return the number of rows and columns of the terminal. + * + * The value is cached so this is quick. The cached result is computed + * by `updateWindowSize()`. + */ +std::pair getWindowSize(); + +} diff --git a/src/libutil/tests/args.cc b/src/libutil/tests/args.cc new file mode 100644 index 000000000..bea74a8c8 --- /dev/null +++ b/src/libutil/tests/args.cc @@ -0,0 +1,168 @@ +#include "../args.hh" +#include "libutil/fs-sink.hh" +#include + +#include +#include + +namespace nix { + + TEST(parseShebangContent, basic) { + std::list r = parseShebangContent("hi there"); + ASSERT_EQ(r.size(), 2); + auto i = r.begin(); + ASSERT_EQ(*i++, "hi"); + ASSERT_EQ(*i++, "there"); + } + + TEST(parseShebangContent, empty) { + std::list r = parseShebangContent(""); + ASSERT_EQ(r.size(), 0); + } + + TEST(parseShebangContent, doubleBacktick) { + std::list r = parseShebangContent("``\"ain't that nice\"``"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, "\"ain't that nice\""); + } + + TEST(parseShebangContent, doubleBacktickEmpty) { + std::list r = parseShebangContent("````"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, ""); + } + + TEST(parseShebangContent, doubleBacktickMarkdownInlineCode) { + std::list r = parseShebangContent("``# I'm markdown section about `coolFunction` ``"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, "# I'm markdown section about `coolFunction`"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlockNaive) { + std::list r = parseShebangContent("``Example 1\n```nix\na: a\n``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n``nix\na: a\n``"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlockCorrect) { + std::list r = parseShebangContent("``Example 1\n````nix\na: a\n```` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n```nix\na: a\n```"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlock2) { + std::list r = parseShebangContent("``Example 1\n````nix\na: a\n````\nExample 2\n````nix\na: a\n```` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n```nix\na: a\n```\nExample 2\n```nix\na: a\n```"); + } + + TEST(parseShebangContent, singleBacktickInDoubleBacktickQuotes) { + std::list r = parseShebangContent("``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "`"); + } + + TEST(parseShebangContent, singleBacktickAndSpaceInDoubleBacktickQuotes) { + std::list r = parseShebangContent("``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "` "); + } + + TEST(parseShebangContent, doubleBacktickInDoubleBacktickQuotes) { + std::list r = parseShebangContent("````` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "``"); + } + + TEST(parseShebangContent, increasingQuotes) { + std::list r = parseShebangContent("```` ``` `` ````` `` `````` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 4); + ASSERT_EQ(*i++, ""); + ASSERT_EQ(*i++, "`"); + ASSERT_EQ(*i++, "``"); + ASSERT_EQ(*i++, "```"); + } + + +#ifndef COVERAGE + +// quick and dirty +static inline std::string escape(std::string_view s_) { + + std::string_view s = s_; + std::string r = "``"; + + // make a guess to allocate ahead of time + r.reserve( + // plain chars + s.size() + // quotes + + 5 + // some "escape" backticks + + s.size() / 8); + + while (!s.empty()) { + if (s[0] == '`' && s.size() >= 2 && s[1] == '`') { + // escape it + r += "`"; + while (!s.empty() && s[0] == '`') { + r += "`"; + s = s.substr(1); + } + } else { + r += s[0]; + s = s.substr(1); + } + } + + if (!r.empty() + && ( + r[r.size() - 1] == '`' + || r[r.size() - 1] == ' ' + )) { + r += " "; + } + + r += "``"; + + return r; +}; + +RC_GTEST_PROP( + parseShebangContent, + prop_round_trip_single, + (const std::string & orig)) +{ + auto escaped = escape(orig); + // RC_LOG() << "escaped: <[[" << escaped << "]]>" << std::endl; + auto ss = parseShebangContent(escaped); + RC_ASSERT(ss.size() == 1); + RC_ASSERT(*ss.begin() == orig); +} + +RC_GTEST_PROP( + parseShebangContent, + prop_round_trip_two, + (const std::string & one, const std::string & two)) +{ + auto ss = parseShebangContent(escape(one) + " " + escape(two)); + RC_ASSERT(ss.size() == 2); + auto i = ss.begin(); + RC_ASSERT(*i++ == one); + RC_ASSERT(*i++ == two); +} + + +#endif + +} \ No newline at end of file diff --git a/src/libutil/tests/characterization.hh b/src/libutil/tests/characterization.hh new file mode 100644 index 000000000..6eb513d68 --- /dev/null +++ b/src/libutil/tests/characterization.hh @@ -0,0 +1,108 @@ +#pragma once +///@file + +#include + +#include "types.hh" +#include "environment-variables.hh" + +namespace nix { + +/** + * The path to the `unit-test-data` directory. See the contributing + * guide in the manual for further details. + */ +static Path getUnitTestData() { + return getEnv("_NIX_TEST_UNIT_DATA").value(); +} + +/** + * Whether we should update "golden masters" instead of running tests + * against them. See the contributing guide in the manual for further + * details. + */ +static bool testAccept() { + return getEnv("_NIX_TEST_ACCEPT") == "1"; +} + +/** + * Mixin class for writing characterization tests + */ +class CharacterizationTest : public virtual ::testing::Test +{ +protected: + /** + * While the "golden master" for this characterization test is + * located. It should not be shared with any other test. + */ + virtual Path goldenMaster(PathView testStem) const = 0; + +public: + /** + * Golden test for reading + * + * @param test hook that takes the contents of the file and does the + * actual work + */ + void readTest(PathView testStem, auto && test) + { + auto file = goldenMaster(testStem); + + if (testAccept()) + { + GTEST_SKIP() + << "Cannot read golden master " + << file + << "because another test is also updating it"; + } + else + { + test(readFile(file)); + } + } + + /** + * Golden test for writing + * + * @param test hook that produces contents of the file and does the + * actual work + */ + void writeTest( + PathView testStem, auto && test, auto && readFile2, auto && writeFile2) + { + auto file = goldenMaster(testStem); + + auto got = test(); + + if (testAccept()) + { + createDirs(dirOf(file)); + writeFile2(file, got); + GTEST_SKIP() + << "Updating golden master " + << file; + } + else + { + decltype(got) expected = readFile2(file); + ASSERT_EQ(got, expected); + } + } + + /** + * Specialize to `std::string` + */ + void writeTest(PathView testStem, auto && test) + { + writeTest( + testStem, test, + [](const Path & f) -> std::string { + return readFile(f); + }, + [](const Path & f, const std::string & c) { + return writeFile(f, c); + }); + } +}; + +} diff --git a/src/libutil/tests/logging.cc b/src/libutil/tests/logging.cc index 2ffdc2e9b..c6dfe63d3 100644 --- a/src/libutil/tests/logging.cc +++ b/src/libutil/tests/logging.cc @@ -2,7 +2,6 @@ #include "logging.hh" #include "nixexpr.hh" -#include "util.hh" #include #include diff --git a/src/libutil/tests/tests.cc b/src/libutil/tests/tests.cc index f3c1e8248..568f03f70 100644 --- a/src/libutil/tests/tests.cc +++ b/src/libutil/tests/tests.cc @@ -1,5 +1,8 @@ #include "util.hh" #include "types.hh" +#include "file-system.hh" +#include "processes.hh" +#include "terminal.hh" #include #include diff --git a/src/libutil/thread-pool.cc b/src/libutil/thread-pool.cc index dc4067f1b..c5e735617 100644 --- a/src/libutil/thread-pool.cc +++ b/src/libutil/thread-pool.cc @@ -1,4 +1,6 @@ #include "thread-pool.hh" +#include "signals.hh" +#include "util.hh" namespace nix { diff --git a/src/libutil/thread-pool.hh b/src/libutil/thread-pool.hh index 0e09fae97..02765badc 100644 --- a/src/libutil/thread-pool.hh +++ b/src/libutil/thread-pool.hh @@ -1,8 +1,8 @@ #pragma once ///@file +#include "error.hh" #include "sync.hh" -#include "util.hh" #include #include diff --git a/src/libutil/unix-domain-socket.cc b/src/libutil/unix-domain-socket.cc new file mode 100644 index 000000000..8949461d2 --- /dev/null +++ b/src/libutil/unix-domain-socket.cc @@ -0,0 +1,100 @@ +#include "file-system.hh" +#include "processes.hh" +#include "unix-domain-socket.hh" + +#include +#include +#include + +namespace nix { + +AutoCloseFD createUnixDomainSocket() +{ + AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM + #ifdef SOCK_CLOEXEC + | SOCK_CLOEXEC + #endif + , 0); + if (!fdSocket) + throw SysError("cannot create Unix domain socket"); + closeOnExec(fdSocket.get()); + return fdSocket; +} + + +AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) +{ + auto fdSocket = nix::createUnixDomainSocket(); + + bind(fdSocket.get(), path); + + if (chmod(path.c_str(), mode) == -1) + throw SysError("changing permissions on '%1%'", path); + + if (listen(fdSocket.get(), 100) == -1) + throw SysError("cannot listen on socket '%1%'", path); + + return fdSocket; +} + + +void bind(int fd, const std::string & path) +{ + unlink(path.c_str()); + + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + + if (path.size() + 1 >= sizeof(addr.sun_path)) { + Pid pid = startProcess([&]() { + Path dir = dirOf(path); + if (chdir(dir.c_str()) == -1) + throw SysError("chdir to '%s' failed", dir); + std::string base(baseNameOf(path)); + if (base.size() + 1 >= sizeof(addr.sun_path)) + throw Error("socket path '%s' is too long", base); + memcpy(addr.sun_path, base.c_str(), base.size() + 1); + if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot bind to socket '%s'", path); + _exit(0); + }); + int status = pid.wait(); + if (status != 0) + throw Error("cannot bind to socket '%s'", path); + } else { + memcpy(addr.sun_path, path.c_str(), path.size() + 1); + if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot bind to socket '%s'", path); + } +} + + +void connect(int fd, const std::string & path) +{ + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + + if (path.size() + 1 >= sizeof(addr.sun_path)) { + Pid pid = startProcess([&]() { + Path dir = dirOf(path); + if (chdir(dir.c_str()) == -1) + throw SysError("chdir to '%s' failed", dir); + std::string base(baseNameOf(path)); + if (base.size() + 1 >= sizeof(addr.sun_path)) + throw Error("socket path '%s' is too long", base); + memcpy(addr.sun_path, base.c_str(), base.size() + 1); + if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot connect to socket at '%s'", path); + _exit(0); + }); + int status = pid.wait(); + if (status != 0) + throw Error("cannot connect to socket at '%s'", path); + } else { + memcpy(addr.sun_path, path.c_str(), path.size() + 1); + if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot connect to socket at '%s'", path); + } +} + +} diff --git a/src/libutil/unix-domain-socket.hh b/src/libutil/unix-domain-socket.hh new file mode 100644 index 000000000..b78feb454 --- /dev/null +++ b/src/libutil/unix-domain-socket.hh @@ -0,0 +1,31 @@ +#pragma once +///@file + +#include "types.hh" +#include "file-descriptor.hh" + +#include + +namespace nix { + +/** + * Create a Unix domain socket. + */ +AutoCloseFD createUnixDomainSocket(); + +/** + * Create a Unix domain socket in listen mode. + */ +AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); + +/** + * Bind a Unix domain socket to a path. + */ +void bind(int fd, const std::string & path); + +/** + * Connect to a Unix domain socket. + */ +void connect(int fd, const std::string & path); + +} diff --git a/src/libutil/users.cc b/src/libutil/users.cc new file mode 100644 index 000000000..95a641322 --- /dev/null +++ b/src/libutil/users.cc @@ -0,0 +1,116 @@ +#include "util.hh" +#include "users.hh" +#include "environment-variables.hh" +#include "file-system.hh" + +#include +#include +#include + +namespace nix { + +std::string getUserName() +{ + auto pw = getpwuid(geteuid()); + std::string name = pw ? pw->pw_name : getEnv("USER").value_or(""); + if (name.empty()) + throw Error("cannot figure out user name"); + return name; +} + +Path getHomeOf(uid_t userId) +{ + std::vector buf(16384); + struct passwd pwbuf; + struct passwd * pw; + if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0 + || !pw || !pw->pw_dir || !pw->pw_dir[0]) + throw Error("cannot determine user's home directory"); + return pw->pw_dir; +} + +Path getHome() +{ + static Path homeDir = []() + { + std::optional unownedUserHomeDir = {}; + auto homeDir = getEnv("HOME"); + if (homeDir) { + // Only use $HOME if doesn't exist or is owned by the current user. + struct stat st; + int result = stat(homeDir->c_str(), &st); + if (result != 0) { + if (errno != ENOENT) { + warn("couldn't stat $HOME ('%s') for reason other than not existing ('%d'), falling back to the one defined in the 'passwd' file", *homeDir, errno); + homeDir.reset(); + } + } else if (st.st_uid != geteuid()) { + unownedUserHomeDir.swap(homeDir); + } + } + if (!homeDir) { + homeDir = getHomeOf(geteuid()); + if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) { + warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir); + } + } + return *homeDir; + }(); + return homeDir; +} + + +Path getCacheDir() +{ + auto cacheDir = getEnv("XDG_CACHE_HOME"); + return cacheDir ? *cacheDir : getHome() + "/.cache"; +} + + +Path getConfigDir() +{ + auto configDir = getEnv("XDG_CONFIG_HOME"); + return configDir ? *configDir : getHome() + "/.config"; +} + +std::vector getConfigDirs() +{ + Path configHome = getConfigDir(); + auto configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg"); + std::vector result = tokenizeString>(configDirs, ":"); + result.insert(result.begin(), configHome); + return result; +} + + +Path getDataDir() +{ + auto dataDir = getEnv("XDG_DATA_HOME"); + return dataDir ? *dataDir : getHome() + "/.local/share"; +} + +Path getStateDir() +{ + auto stateDir = getEnv("XDG_STATE_HOME"); + return stateDir ? *stateDir : getHome() + "/.local/state"; +} + +Path createNixStateDir() +{ + Path dir = getStateDir() + "/nix"; + createDirs(dir); + return dir; +} + + +std::string expandTilde(std::string_view path) +{ + // TODO: expand ~user ? + auto tilde = path.substr(0, 2); + if (tilde == "~/" || tilde == "~") + return getHome() + std::string(path.substr(1)); + else + return std::string(path); +} + +} diff --git a/src/libutil/users.hh b/src/libutil/users.hh new file mode 100644 index 000000000..cecbb8bfb --- /dev/null +++ b/src/libutil/users.hh @@ -0,0 +1,58 @@ +#pragma once +///@file + +#include "types.hh" + +#include + +namespace nix { + +std::string getUserName(); + +/** + * @return the given user's home directory from /etc/passwd. + */ +Path getHomeOf(uid_t userId); + +/** + * @return $HOME or the user's home directory from /etc/passwd. + */ +Path getHome(); + +/** + * @return $XDG_CACHE_HOME or $HOME/.cache. + */ +Path getCacheDir(); + +/** + * @return $XDG_CONFIG_HOME or $HOME/.config. + */ +Path getConfigDir(); + +/** + * @return the directories to search for user configuration files + */ +std::vector getConfigDirs(); + +/** + * @return $XDG_DATA_HOME or $HOME/.local/share. + */ +Path getDataDir(); + +/** + * @return $XDG_STATE_HOME or $HOME/.local/state. + */ +Path getStateDir(); + +/** + * Create the Nix state directory and return the path to it. + */ +Path createNixStateDir(); + +/** + * Perform tilde expansion on a path, replacing tilde with the user's + * home directory. + */ +std::string expandTilde(std::string_view path); + +} diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 3b4c181e5..5bb3f374b 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1,48 +1,11 @@ #include "util.hh" -#include "sync.hh" -#include "finally.hh" -#include "serialise.hh" -#include "cgroup.hh" +#include "fmt.hh" #include #include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include - -#include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __APPLE__ -#include -#include -#endif - -#ifdef __linux__ -#include -#include -#include - -#include -#endif - - -extern char * * environ __attribute__((weak)); +#include namespace nix { @@ -67,1099 +30,8 @@ void initLibUtil() { assert(caught); } -std::optional getEnv(const std::string & key) -{ - char * value = getenv(key.c_str()); - if (!value) return {}; - return std::string(value); -} - -std::optional getEnvNonEmpty(const std::string & key) { - auto value = getEnv(key); - if (value == "") return {}; - return value; -} - -std::map getEnv() -{ - std::map env; - for (size_t i = 0; environ[i]; ++i) { - auto s = environ[i]; - auto eq = strchr(s, '='); - if (!eq) - // invalid env, just keep going - continue; - env.emplace(std::string(s, eq), std::string(eq + 1)); - } - return env; -} - - -void clearEnv() -{ - for (auto & name : getEnv()) - unsetenv(name.first.c_str()); -} - -void replaceEnv(const std::map & newEnv) -{ - clearEnv(); - for (auto & newEnvVar : newEnv) - setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1); -} - - -Path absPath(Path path, std::optional dir, bool resolveSymlinks) -{ - if (path[0] != '/') { - if (!dir) { -#ifdef __GNU__ - /* GNU (aka. GNU/Hurd) doesn't have any limitation on path - lengths and doesn't define `PATH_MAX'. */ - char *buf = getcwd(NULL, 0); - if (buf == NULL) -#else - char buf[PATH_MAX]; - if (!getcwd(buf, sizeof(buf))) -#endif - throw SysError("cannot get cwd"); - path = concatStrings(buf, "/", path); -#ifdef __GNU__ - free(buf); -#endif - } else - path = concatStrings(*dir, "/", path); - } - return canonPath(path, resolveSymlinks); -} - - -Path canonPath(PathView path, bool resolveSymlinks) -{ - assert(path != ""); - - std::string s; - s.reserve(256); - - if (path[0] != '/') - throw Error("not an absolute path: '%1%'", path); - - std::string temp; - - /* Count the number of times we follow a symlink and stop at some - arbitrary (but high) limit to prevent infinite loops. */ - unsigned int followCount = 0, maxFollow = 1024; - - while (1) { - - /* Skip slashes. */ - while (!path.empty() && path[0] == '/') path.remove_prefix(1); - if (path.empty()) break; - - /* Ignore `.'. */ - if (path == "." || path.substr(0, 2) == "./") - path.remove_prefix(1); - - /* If `..', delete the last component. */ - else if (path == ".." || path.substr(0, 3) == "../") - { - if (!s.empty()) s.erase(s.rfind('/')); - path.remove_prefix(2); - } - - /* Normal component; copy it. */ - else { - s += '/'; - if (const auto slash = path.find('/'); slash == std::string::npos) { - s += path; - path = {}; - } else { - s += path.substr(0, slash); - path = path.substr(slash); - } - - /* If s points to a symlink, resolve it and continue from there */ - if (resolveSymlinks && isLink(s)) { - if (++followCount >= maxFollow) - throw Error("infinite symlink recursion in path '%1%'", path); - temp = concatStrings(readLink(s), path); - path = temp; - if (!temp.empty() && temp[0] == '/') { - s.clear(); /* restart for symlinks pointing to absolute path */ - } else { - s = dirOf(s); - if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = / - s.clear(); - } - } - } - } - } - - return s.empty() ? "/" : std::move(s); -} - - -Path dirOf(const PathView path) -{ - Path::size_type pos = path.rfind('/'); - if (pos == std::string::npos) - return "."; - return pos == 0 ? "/" : Path(path, 0, pos); -} - - -std::string_view baseNameOf(std::string_view path) -{ - if (path.empty()) - return ""; - - auto last = path.size() - 1; - if (path[last] == '/' && last > 0) - last -= 1; - - auto pos = path.rfind('/', last); - if (pos == std::string::npos) - pos = 0; - else - pos += 1; - - return path.substr(pos, last - pos + 1); -} - - -std::string expandTilde(std::string_view path) -{ - // TODO: expand ~user ? - auto tilde = path.substr(0, 2); - if (tilde == "~/" || tilde == "~") - return getHome() + std::string(path.substr(1)); - else - return std::string(path); -} - - -bool isInDir(std::string_view path, std::string_view dir) -{ - return path.substr(0, 1) == "/" - && path.substr(0, dir.size()) == dir - && path.size() >= dir.size() + 2 - && path[dir.size()] == '/'; -} - - -bool isDirOrInDir(std::string_view path, std::string_view dir) -{ - return path == dir || isInDir(path, dir); -} - - -struct stat stat(const Path & path) -{ - struct stat st; - if (stat(path.c_str(), &st)) - throw SysError("getting status of '%1%'", path); - return st; -} - - -struct stat lstat(const Path & path) -{ - struct stat st; - if (lstat(path.c_str(), &st)) - throw SysError("getting status of '%1%'", path); - return st; -} - - -bool pathExists(const Path & path) -{ - int res; - struct stat st; - res = lstat(path.c_str(), &st); - if (!res) return true; - if (errno != ENOENT && errno != ENOTDIR) - throw SysError("getting status of %1%", path); - return false; -} - -bool pathAccessible(const Path & path) -{ - try { - return pathExists(path); - } catch (SysError & e) { - // swallow EPERM - if (e.errNo == EPERM) return false; - throw; - } -} - - -Path readLink(const Path & path) -{ - checkInterrupt(); - std::vector buf; - for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) { - buf.resize(bufSize); - ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); - if (rlSize == -1) - if (errno == EINVAL) - throw Error("'%1%' is not a symlink", path); - else - throw SysError("reading symbolic link '%1%'", path); - else if (rlSize < bufSize) - return std::string(buf.data(), rlSize); - } -} - - -bool isLink(const Path & path) -{ - struct stat st = lstat(path); - return S_ISLNK(st.st_mode); -} - - -DirEntries readDirectory(DIR *dir, const Path & path) -{ - DirEntries entries; - entries.reserve(64); - - struct dirent * dirent; - while (errno = 0, dirent = readdir(dir)) { /* sic */ - checkInterrupt(); - std::string name = dirent->d_name; - if (name == "." || name == "..") continue; - entries.emplace_back(name, dirent->d_ino, -#ifdef HAVE_STRUCT_DIRENT_D_TYPE - dirent->d_type -#else - DT_UNKNOWN -#endif - ); - } - if (errno) throw SysError("reading directory '%1%'", path); - - return entries; -} - -DirEntries readDirectory(const Path & path) -{ - AutoCloseDir dir(opendir(path.c_str())); - if (!dir) throw SysError("opening directory '%1%'", path); - - return readDirectory(dir.get(), path); -} - - -unsigned char getFileType(const Path & path) -{ - struct stat st = lstat(path); - if (S_ISDIR(st.st_mode)) return DT_DIR; - if (S_ISLNK(st.st_mode)) return DT_LNK; - if (S_ISREG(st.st_mode)) return DT_REG; - return DT_UNKNOWN; -} - - -std::string readFile(int fd) -{ - struct stat st; - if (fstat(fd, &st) == -1) - throw SysError("statting file"); - - return drainFD(fd, true, st.st_size); -} - - -std::string readFile(const Path & path) -{ - AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); - if (!fd) - throw SysError("opening file '%1%'", path); - return readFile(fd.get()); -} - - -void readFile(const Path & path, Sink & sink) -{ - AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); - if (!fd) - throw SysError("opening file '%s'", path); - drainFD(fd.get(), sink); -} - - -void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) -{ - AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); - if (!fd) - throw SysError("opening file '%1%'", path); - try { - writeFull(fd.get(), s); - } catch (Error & e) { - e.addTrace({}, "writing file '%1%'", path); - throw; - } - if (sync) - fd.fsync(); - // Explicitly close to make sure exceptions are propagated. - fd.close(); - if (sync) - syncParent(path); -} - - -void writeFile(const Path & path, Source & source, mode_t mode, bool sync) -{ - AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); - if (!fd) - throw SysError("opening file '%1%'", path); - - std::vector buf(64 * 1024); - - try { - while (true) { - try { - auto n = source.read(buf.data(), buf.size()); - writeFull(fd.get(), {buf.data(), n}); - } catch (EndOfFile &) { break; } - } - } catch (Error & e) { - e.addTrace({}, "writing file '%1%'", path); - throw; - } - if (sync) - fd.fsync(); - // Explicitly close to make sure exceptions are propagated. - fd.close(); - if (sync) - syncParent(path); -} - -void syncParent(const Path & path) -{ - AutoCloseFD fd = open(dirOf(path).c_str(), O_RDONLY, 0); - if (!fd) - throw SysError("opening file '%1%'", path); - fd.fsync(); -} - -std::string readLine(int fd) -{ - std::string s; - while (1) { - checkInterrupt(); - char ch; - // FIXME: inefficient - ssize_t rd = read(fd, &ch, 1); - if (rd == -1) { - if (errno != EINTR) - throw SysError("reading a line"); - } else if (rd == 0) - throw EndOfFile("unexpected EOF reading a line"); - else { - if (ch == '\n') return s; - s += ch; - } - } -} - - -void writeLine(int fd, std::string s) -{ - s += '\n'; - writeFull(fd, s); -} - - -static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed) -{ - checkInterrupt(); - - std::string name(baseNameOf(path)); - - struct stat st; - if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { - if (errno == ENOENT) return; - throw SysError("getting status of '%1%'", path); - } - - if (!S_ISDIR(st.st_mode)) { - /* We are about to delete a file. Will it likely free space? */ - - switch (st.st_nlink) { - /* Yes: last link. */ - case 1: - bytesFreed += st.st_size; - break; - /* Maybe: yes, if 'auto-optimise-store' or manual optimisation - was performed. Instead of checking for real let's assume - it's an optimised file and space will be freed. - - In worst case we will double count on freed space for files - with exactly two hardlinks for unoptimised packages. - */ - case 2: - bytesFreed += st.st_size; - break; - /* No: 3+ links. */ - default: - break; - } - } - - if (S_ISDIR(st.st_mode)) { - /* Make the directory accessible. */ - const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; - if ((st.st_mode & PERM_MASK) != PERM_MASK) { - if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) - throw SysError("chmod '%1%'", path); - } - - int fd = openat(parentfd, path.c_str(), O_RDONLY); - if (fd == -1) - throw SysError("opening directory '%1%'", path); - AutoCloseDir dir(fdopendir(fd)); - if (!dir) - throw SysError("opening directory '%1%'", path); - for (auto & i : readDirectory(dir.get(), path)) - _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed); - } - - int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; - if (unlinkat(parentfd, name.c_str(), flags) == -1) { - if (errno == ENOENT) return; - throw SysError("cannot unlink '%1%'", path); - } -} - -static void _deletePath(const Path & path, uint64_t & bytesFreed) -{ - Path dir = dirOf(path); - if (dir == "") - dir = "/"; - - AutoCloseFD dirfd{open(dir.c_str(), O_RDONLY)}; - if (!dirfd) { - if (errno == ENOENT) return; - throw SysError("opening directory '%1%'", path); - } - - _deletePath(dirfd.get(), path, bytesFreed); -} - - -void deletePath(const Path & path) -{ - uint64_t dummy; - deletePath(path, dummy); -} - - -void deletePath(const Path & path, uint64_t & bytesFreed) -{ - //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path); - bytesFreed = 0; - _deletePath(path, bytesFreed); -} - - -std::string getUserName() -{ - auto pw = getpwuid(geteuid()); - std::string name = pw ? pw->pw_name : getEnv("USER").value_or(""); - if (name.empty()) - throw Error("cannot figure out user name"); - return name; -} - -Path getHomeOf(uid_t userId) -{ - std::vector buf(16384); - struct passwd pwbuf; - struct passwd * pw; - if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0 - || !pw || !pw->pw_dir || !pw->pw_dir[0]) - throw Error("cannot determine user's home directory"); - return pw->pw_dir; -} - -Path getHome() -{ - static Path homeDir = []() - { - std::optional unownedUserHomeDir = {}; - auto homeDir = getEnv("HOME"); - if (homeDir) { - // Only use $HOME if doesn't exist or is owned by the current user. - struct stat st; - int result = stat(homeDir->c_str(), &st); - if (result != 0) { - if (errno != ENOENT) { - warn("couldn't stat $HOME ('%s') for reason other than not existing ('%d'), falling back to the one defined in the 'passwd' file", *homeDir, errno); - homeDir.reset(); - } - } else if (st.st_uid != geteuid()) { - unownedUserHomeDir.swap(homeDir); - } - } - if (!homeDir) { - homeDir = getHomeOf(geteuid()); - if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) { - warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir); - } - } - return *homeDir; - }(); - return homeDir; -} - - -Path getCacheDir() -{ - auto cacheDir = getEnv("XDG_CACHE_HOME"); - return cacheDir ? *cacheDir : getHome() + "/.cache"; -} - - -Path getConfigDir() -{ - auto configDir = getEnv("XDG_CONFIG_HOME"); - return configDir ? *configDir : getHome() + "/.config"; -} - -std::vector getConfigDirs() -{ - Path configHome = getConfigDir(); - auto configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg"); - std::vector result = tokenizeString>(configDirs, ":"); - result.insert(result.begin(), configHome); - return result; -} - - -Path getDataDir() -{ - auto dataDir = getEnv("XDG_DATA_HOME"); - return dataDir ? *dataDir : getHome() + "/.local/share"; -} - -Path getStateDir() -{ - auto stateDir = getEnv("XDG_STATE_HOME"); - return stateDir ? *stateDir : getHome() + "/.local/state"; -} - -Path createNixStateDir() -{ - Path dir = getStateDir() + "/nix"; - createDirs(dir); - return dir; -} - - -std::optional getSelfExe() -{ - static auto cached = []() -> std::optional - { - #if __linux__ - return readLink("/proc/self/exe"); - #elif __APPLE__ - char buf[1024]; - uint32_t size = sizeof(buf); - if (_NSGetExecutablePath(buf, &size) == 0) - return buf; - else - return std::nullopt; - #else - return std::nullopt; - #endif - }(); - return cached; -} - - -Paths createDirs(const Path & path) -{ - Paths created; - if (path == "/") return created; - - struct stat st; - if (lstat(path.c_str(), &st) == -1) { - created = createDirs(dirOf(path)); - if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST) - throw SysError("creating directory '%1%'", path); - st = lstat(path); - created.push_back(path); - } - - if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1) - throw SysError("statting symlink '%1%'", path); - - if (!S_ISDIR(st.st_mode)) throw Error("'%1%' is not a directory", path); - - return created; -} - - -void readFull(int fd, char * buf, size_t count) -{ - while (count) { - checkInterrupt(); - ssize_t res = read(fd, buf, count); - if (res == -1) { - if (errno == EINTR) continue; - throw SysError("reading from file"); - } - if (res == 0) throw EndOfFile("unexpected end-of-file"); - count -= res; - buf += res; - } -} - - -void writeFull(int fd, std::string_view s, bool allowInterrupts) -{ - while (!s.empty()) { - if (allowInterrupts) checkInterrupt(); - ssize_t res = write(fd, s.data(), s.size()); - if (res == -1 && errno != EINTR) - throw SysError("writing to file"); - if (res > 0) - s.remove_prefix(res); - } -} - - -std::string drainFD(int fd, bool block, const size_t reserveSize) -{ - // the parser needs two extra bytes to append terminating characters, other users will - // not care very much about the extra memory. - StringSink sink(reserveSize + 2); - drainFD(fd, sink, block); - return std::move(sink.s); -} - - -void drainFD(int fd, Sink & sink, bool block) -{ - // silence GCC maybe-uninitialized warning in finally - int saved = 0; - - if (!block) { - saved = fcntl(fd, F_GETFL); - if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) - throw SysError("making file descriptor non-blocking"); - } - - Finally finally([&]() { - if (!block) { - if (fcntl(fd, F_SETFL, saved) == -1) - throw SysError("making file descriptor blocking"); - } - }); - - std::vector buf(64 * 1024); - while (1) { - checkInterrupt(); - ssize_t rd = read(fd, buf.data(), buf.size()); - if (rd == -1) { - if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) - break; - if (errno != EINTR) - throw SysError("reading from file"); - } - else if (rd == 0) break; - else sink({(char *) buf.data(), (size_t) rd}); - } -} - ////////////////////////////////////////////////////////////////////// -unsigned int getMaxCPU() -{ - #if __linux__ - try { - auto cgroupFS = getCgroupFS(); - if (!cgroupFS) return 0; - - auto cgroups = getCgroups("/proc/self/cgroup"); - auto cgroup = cgroups[""]; - if (cgroup == "") return 0; - - auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max"; - - auto cpuMax = readFile(cpuFile); - auto cpuMaxParts = tokenizeString>(cpuMax, " \n"); - auto quota = cpuMaxParts[0]; - auto period = cpuMaxParts[1]; - if (quota != "max") - return std::ceil(std::stoi(quota) / std::stof(period)); - } catch (Error &) { ignoreException(lvlDebug); } - #endif - - return 0; -} - -////////////////////////////////////////////////////////////////////// - - -AutoDelete::AutoDelete() : del{false} {} - -AutoDelete::AutoDelete(const std::string & p, bool recursive) : path(p) -{ - del = true; - this->recursive = recursive; -} - -AutoDelete::~AutoDelete() -{ - try { - if (del) { - if (recursive) - deletePath(path); - else { - if (remove(path.c_str()) == -1) - throw SysError("cannot unlink '%1%'", path); - } - } - } catch (...) { - ignoreException(); - } -} - -void AutoDelete::cancel() -{ - del = false; -} - -void AutoDelete::reset(const Path & p, bool recursive) { - path = p; - this->recursive = recursive; - del = true; -} - - - -////////////////////////////////////////////////////////////////////// - - -AutoCloseFD::AutoCloseFD() : fd{-1} {} - - -AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {} - - -AutoCloseFD::AutoCloseFD(AutoCloseFD && that) : fd{that.fd} -{ - that.fd = -1; -} - - -AutoCloseFD & AutoCloseFD::operator =(AutoCloseFD && that) -{ - close(); - fd = that.fd; - that.fd = -1; - return *this; -} - - -AutoCloseFD::~AutoCloseFD() -{ - try { - close(); - } catch (...) { - ignoreException(); - } -} - - -int AutoCloseFD::get() const -{ - return fd; -} - - -void AutoCloseFD::close() -{ - if (fd != -1) { - if (::close(fd) == -1) - /* This should never happen. */ - throw SysError("closing file descriptor %1%", fd); - fd = -1; - } -} - -void AutoCloseFD::fsync() -{ - if (fd != -1) { - int result; -#if __APPLE__ - result = ::fcntl(fd, F_FULLFSYNC); -#else - result = ::fsync(fd); -#endif - if (result == -1) - throw SysError("fsync file descriptor %1%", fd); - } -} - - -AutoCloseFD::operator bool() const -{ - return fd != -1; -} - - -int AutoCloseFD::release() -{ - int oldFD = fd; - fd = -1; - return oldFD; -} - - -void Pipe::create() -{ - int fds[2]; -#if HAVE_PIPE2 - if (pipe2(fds, O_CLOEXEC) != 0) throw SysError("creating pipe"); -#else - if (pipe(fds) != 0) throw SysError("creating pipe"); - closeOnExec(fds[0]); - closeOnExec(fds[1]); -#endif - readSide = fds[0]; - writeSide = fds[1]; -} - - -void Pipe::close() -{ - readSide.close(); - writeSide.close(); -} - - -////////////////////////////////////////////////////////////////////// - - -Pid::Pid() -{ -} - - -Pid::Pid(pid_t pid) - : pid(pid) -{ -} - - -Pid::~Pid() -{ - if (pid != -1) kill(); -} - - -void Pid::operator =(pid_t pid) -{ - if (this->pid != -1 && this->pid != pid) kill(); - this->pid = pid; - killSignal = SIGKILL; // reset signal to default -} - - -Pid::operator pid_t() -{ - return pid; -} - - -int Pid::kill() -{ - assert(pid != -1); - - debug("killing process %1%", pid); - - /* Send the requested signal to the child. If it has its own - process group, send the signal to every process in the child - process group (which hopefully includes *all* its children). */ - if (::kill(separatePG ? -pid : pid, killSignal) != 0) { - /* On BSDs, killing a process group will return EPERM if all - processes in the group are zombies (or something like - that). So try to detect and ignore that situation. */ -#if __FreeBSD__ || __APPLE__ - if (errno != EPERM || ::kill(pid, 0) != 0) -#endif - logError(SysError("killing process %d", pid).info()); - } - - return wait(); -} - - -int Pid::wait() -{ - assert(pid != -1); - while (1) { - int status; - int res = waitpid(pid, &status, 0); - if (res == pid) { - pid = -1; - return status; - } - if (errno != EINTR) - throw SysError("cannot get exit status of PID %d", pid); - checkInterrupt(); - } -} - - -void Pid::setSeparatePG(bool separatePG) -{ - this->separatePG = separatePG; -} - - -void Pid::setKillSignal(int signal) -{ - this->killSignal = signal; -} - - -pid_t Pid::release() -{ - pid_t p = pid; - pid = -1; - return p; -} - - -void killUser(uid_t uid) -{ - debug("killing all processes running under uid '%1%'", uid); - - assert(uid != 0); /* just to be safe... */ - - /* The system call kill(-1, sig) sends the signal `sig' to all - users to which the current process can send signals. So we - fork a process, switch to uid, and send a mass kill. */ - - Pid pid = startProcess([&]() { - - if (setuid(uid) == -1) - throw SysError("setting uid"); - - while (true) { -#ifdef __APPLE__ - /* OSX's kill syscall takes a third parameter that, among - other things, determines if kill(-1, signo) affects the - calling process. In the OSX libc, it's set to true, - which means "follow POSIX", which we don't want here - */ - if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break; -#else - if (kill(-1, SIGKILL) == 0) break; -#endif - if (errno == ESRCH || errno == EPERM) break; /* no more processes */ - if (errno != EINTR) - throw SysError("cannot kill processes for uid '%1%'", uid); - } - - _exit(0); - }); - - int status = pid.wait(); - if (status != 0) - throw Error("cannot kill processes for uid '%1%': %2%", uid, statusToString(status)); - - /* !!! We should really do some check to make sure that there are - no processes left running under `uid', but there is no portable - way to do so (I think). The most reliable way may be `ps -eo - uid | grep -q $uid'. */ -} - - -////////////////////////////////////////////////////////////////////// - - -/* Wrapper around vfork to prevent the child process from clobbering - the caller's stack frame in the parent. */ -static pid_t doFork(bool allowVfork, std::function fun) __attribute__((noinline)); -static pid_t doFork(bool allowVfork, std::function fun) -{ -#ifdef __linux__ - pid_t pid = allowVfork ? vfork() : fork(); -#else - pid_t pid = fork(); -#endif - if (pid != 0) return pid; - fun(); - abort(); -} - - -#if __linux__ -static int childEntry(void * arg) -{ - auto main = (std::function *) arg; - (*main)(); - return 1; -} -#endif - - -pid_t startProcess(std::function fun, const ProcessOptions & options) -{ - std::function wrapper = [&]() { - if (!options.allowVfork) - logger = makeSimpleLogger(); - try { -#if __linux__ - if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) - throw SysError("setting death signal"); -#endif - fun(); - } catch (std::exception & e) { - try { - std::cerr << options.errorPrefix << e.what() << "\n"; - } catch (...) { } - } catch (...) { } - if (options.runExitHandlers) - exit(1); - else - _exit(1); - }; - - pid_t pid = -1; - - if (options.cloneFlags) { - #ifdef __linux__ - // Not supported, since then we don't know when to free the stack. - assert(!(options.cloneFlags & CLONE_VM)); - - size_t stackSize = 1 * 1024 * 1024; - auto stack = (char *) mmap(0, stackSize, - PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) throw SysError("allocating stack"); - - Finally freeStack([&]() { munmap(stack, stackSize); }); - - pid = clone(childEntry, stack + stackSize, options.cloneFlags | SIGCHLD, &wrapper); - #else - throw Error("clone flags are only supported on Linux"); - #endif - } else - pid = doFork(options.allowVfork, wrapper); - - if (pid == -1) throw SysError("unable to fork"); - - return pid; -} - - std::vector stringsToCharPtrs(const Strings & ss) { std::vector res; @@ -1168,211 +40,6 @@ std::vector stringsToCharPtrs(const Strings & ss) return res; } -std::string runProgram(Path program, bool searchPath, const Strings & args, - const std::optional & input, bool isInteractive) -{ - auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input, .isInteractive = isInteractive}); - - if (!statusOk(res.first)) - throw ExecError(res.first, "program '%1%' %2%", program, statusToString(res.first)); - - return res.second; -} - -// Output = error code + "standard out" output stream -std::pair runProgram(RunOptions && options) -{ - StringSink sink; - options.standardOut = &sink; - - int status = 0; - - try { - runProgram2(options); - } catch (ExecError & e) { - status = e.status; - } - - return {status, std::move(sink.s)}; -} - -void runProgram2(const RunOptions & options) -{ - checkInterrupt(); - - assert(!(options.standardIn && options.input)); - - std::unique_ptr source_; - Source * source = options.standardIn; - - if (options.input) { - source_ = std::make_unique(*options.input); - source = source_.get(); - } - - /* Create a pipe. */ - Pipe out, in; - if (options.standardOut) out.create(); - if (source) in.create(); - - ProcessOptions processOptions; - // vfork implies that the environment of the main process and the fork will - // be shared (technically this is undefined, but in practice that's the - // case), so we can't use it if we alter the environment - processOptions.allowVfork = !options.environment; - - std::optional>> resumeLoggerDefer; - if (options.isInteractive) { - logger->pause(); - resumeLoggerDefer.emplace( - []() { - logger->resume(); - } - ); - } - - /* Fork. */ - Pid pid = startProcess([&]() { - if (options.environment) - replaceEnv(*options.environment); - if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) - throw SysError("dupping stdout"); - if (options.mergeStderrToStdout) - if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) - throw SysError("cannot dup stdout into stderr"); - if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1) - throw SysError("dupping stdin"); - - if (options.chdir && chdir((*options.chdir).c_str()) == -1) - throw SysError("chdir failed"); - if (options.gid && setgid(*options.gid) == -1) - throw SysError("setgid failed"); - /* Drop all other groups if we're setgid. */ - if (options.gid && setgroups(0, 0) == -1) - throw SysError("setgroups failed"); - if (options.uid && setuid(*options.uid) == -1) - throw SysError("setuid failed"); - - Strings args_(options.args); - args_.push_front(options.program); - - restoreProcessContext(); - - if (options.searchPath) - execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); - // This allows you to refer to a program with a pathname relative - // to the PATH variable. - else - execv(options.program.c_str(), stringsToCharPtrs(args_).data()); - - throw SysError("executing '%1%'", options.program); - }, processOptions); - - out.writeSide.close(); - - std::thread writerThread; - - std::promise promise; - - Finally doJoin([&]() { - if (writerThread.joinable()) - writerThread.join(); - }); - - - if (source) { - in.readSide.close(); - writerThread = std::thread([&]() { - try { - std::vector buf(8 * 1024); - while (true) { - size_t n; - try { - n = source->read(buf.data(), buf.size()); - } catch (EndOfFile &) { - break; - } - writeFull(in.writeSide.get(), {buf.data(), n}); - } - promise.set_value(); - } catch (...) { - promise.set_exception(std::current_exception()); - } - in.writeSide.close(); - }); - } - - if (options.standardOut) - drainFD(out.readSide.get(), *options.standardOut); - - /* Wait for the child to finish. */ - int status = pid.wait(); - - /* Wait for the writer thread to finish. */ - if (source) promise.get_future().get(); - - if (status) - throw ExecError(status, "program '%1%' %2%", options.program, statusToString(status)); -} - - -void closeMostFDs(const std::set & exceptions) -{ -#if __linux__ - try { - for (auto & s : readDirectory("/proc/self/fd")) { - auto fd = std::stoi(s.name); - if (!exceptions.count(fd)) { - debug("closing leaked FD %d", fd); - close(fd); - } - } - return; - } catch (SysError &) { - } -#endif - - int maxFD = 0; - maxFD = sysconf(_SC_OPEN_MAX); - for (int fd = 0; fd < maxFD; ++fd) - if (!exceptions.count(fd)) - close(fd); /* ignore result */ -} - - -void closeOnExec(int fd) -{ - int prev; - if ((prev = fcntl(fd, F_GETFD, 0)) == -1 || - fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1) - throw SysError("setting close-on-exec flag"); -} - - -////////////////////////////////////////////////////////////////////// - - -std::atomic _isInterrupted = false; - -static thread_local bool interruptThrown = false; -thread_local std::function interruptCheck; - -void setInterruptThrown() -{ - interruptThrown = true; -} - -void _interrupted() -{ - /* Block user interrupts while an exception is being handled. - Throwing an exception while another exception is being handled - kills the program! */ - if (!interruptThrown && !std::uncaught_exceptions()) { - interruptThrown = true; - throw Interrupted("interrupted by the user"); - } -} - ////////////////////////////////////////////////////////////////////// @@ -1438,32 +105,6 @@ std::string rewriteStrings(std::string s, const StringMap & rewrites) } -std::string statusToString(int status) -{ - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - if (WIFEXITED(status)) - return fmt("failed with exit code %1%", WEXITSTATUS(status)); - else if (WIFSIGNALED(status)) { - int sig = WTERMSIG(status); -#if HAVE_STRSIGNAL - const char * description = strsignal(sig); - return fmt("failed due to signal %1% (%2%)", sig, description); -#else - return fmt("failed due to signal %1%", sig); -#endif - } - else - return "died abnormally"; - } else return "succeeded"; -} - - -bool statusOk(int status) -{ - return WIFEXITED(status) && WEXITSTATUS(status) == 0; -} - - bool hasPrefix(std::string_view s, std::string_view prefix) { return s.compare(0, prefix.size(), prefix) == 0; @@ -1511,82 +152,6 @@ void ignoreException(Verbosity lvl) } catch (...) { } } -bool shouldANSI() -{ - return isatty(STDERR_FILENO) - && getEnv("TERM").value_or("dumb") != "dumb" - && !(getEnv("NO_COLOR").has_value() || getEnv("NOCOLOR").has_value()); -} - -std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width) -{ - std::string t, e; - size_t w = 0; - auto i = s.begin(); - - while (w < (size_t) width && i != s.end()) { - - if (*i == '\e') { - std::string e; - e += *i++; - char last = 0; - - if (i != s.end() && *i == '[') { - e += *i++; - // eat parameter bytes - while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; - // eat intermediate bytes - while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; - // eat final byte - if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; - } else { - if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; - } - - if (!filterAll && last == 'm') - t += e; - } - - else if (*i == '\t') { - i++; t += ' '; w++; - while (w < (size_t) width && w % 8) { - t += ' '; w++; - } - } - - else if (*i == '\r' || *i == '\a') - // do nothing for now - i++; - - else { - w++; - // Copy one UTF-8 character. - if ((*i & 0xe0) == 0xc0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } else if ((*i & 0xf0) == 0xe0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } - } else if ((*i & 0xf8) == 0xf0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } - } - } else - t += *i++; - } - } - - return t; -} - constexpr char base64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; @@ -1703,386 +268,9 @@ std::pair getLine(std::string_view s) } -////////////////////////////////////////////////////////////////////// - -static Sync> windowSize{{0, 0}}; - - -static void updateWindowSize() -{ - struct winsize ws; - if (ioctl(2, TIOCGWINSZ, &ws) == 0) { - auto windowSize_(windowSize.lock()); - windowSize_->first = ws.ws_row; - windowSize_->second = ws.ws_col; - } -} - - -std::pair getWindowSize() -{ - return *windowSize.lock(); -} - - -/* We keep track of interrupt callbacks using integer tokens, so we can iterate - safely without having to lock the data structure while executing arbitrary - functions. - */ -struct InterruptCallbacks { - typedef int64_t Token; - - /* We use unique tokens so that we can't accidentally delete the wrong - handler because of an erroneous double delete. */ - Token nextToken = 0; - - /* Used as a list, see InterruptCallbacks comment. */ - std::map> callbacks; -}; - -static Sync _interruptCallbacks; - -static void signalHandlerThread(sigset_t set) -{ - while (true) { - int signal = 0; - sigwait(&set, &signal); - - if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP) - triggerInterrupt(); - - else if (signal == SIGWINCH) { - updateWindowSize(); - } - } -} - -void triggerInterrupt() -{ - _isInterrupted = true; - - { - InterruptCallbacks::Token i = 0; - while (true) { - std::function callback; - { - auto interruptCallbacks(_interruptCallbacks.lock()); - auto lb = interruptCallbacks->callbacks.lower_bound(i); - if (lb == interruptCallbacks->callbacks.end()) - break; - - callback = lb->second; - i = lb->first + 1; - } - - try { - callback(); - } catch (...) { - ignoreException(); - } - } - } -} - -static sigset_t savedSignalMask; -static bool savedSignalMaskIsSet = false; - -void setChildSignalMask(sigset_t * sigs) -{ - assert(sigs); // C style function, but think of sigs as a reference - -#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE - sigemptyset(&savedSignalMask); - // There's no "assign" or "copy" function, so we rely on (math) idempotence - // of the or operator: a or a = a. - sigorset(&savedSignalMask, sigs, sigs); -#else - // Without sigorset, our best bet is to assume that sigset_t is a type that - // can be assigned directly, such as is the case for a sigset_t defined as - // an integer type. - savedSignalMask = *sigs; -#endif - - savedSignalMaskIsSet = true; -} - -void saveSignalMask() { - if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) - throw SysError("querying signal mask"); - - savedSignalMaskIsSet = true; -} - -void startSignalHandlerThread() -{ - updateWindowSize(); - - saveSignalMask(); - - sigset_t set; - sigemptyset(&set); - sigaddset(&set, SIGINT); - sigaddset(&set, SIGTERM); - sigaddset(&set, SIGHUP); - sigaddset(&set, SIGPIPE); - sigaddset(&set, SIGWINCH); - if (pthread_sigmask(SIG_BLOCK, &set, nullptr)) - throw SysError("blocking signals"); - - std::thread(signalHandlerThread, set).detach(); -} - -static void restoreSignals() -{ - // If startSignalHandlerThread wasn't called, that means we're not running - // in a proper libmain process, but a process that presumably manages its - // own signal handlers. Such a process should call either - // - initNix(), to be a proper libmain process - // - startSignalHandlerThread(), to resemble libmain regarding signal - // handling only - // - saveSignalMask(), for processes that define their own signal handling - // thread - // TODO: Warn about this? Have a default signal mask? The latter depends on - // whether we should generally inherit signal masks from the caller. - // I don't know what the larger unix ecosystem expects from us here. - if (!savedSignalMaskIsSet) - return; - - if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr)) - throw SysError("restoring signals"); -} - -#if __linux__ -rlim_t savedStackSize = 0; -#endif - -void setStackSize(size_t stackSize) -{ - #if __linux__ - struct rlimit limit; - if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { - savedStackSize = limit.rlim_cur; - limit.rlim_cur = stackSize; - setrlimit(RLIMIT_STACK, &limit); - } - #endif -} - -#if __linux__ -static AutoCloseFD fdSavedMountNamespace; -static AutoCloseFD fdSavedRoot; -#endif - -void saveMountNamespace() -{ -#if __linux__ - static std::once_flag done; - std::call_once(done, []() { - fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY); - if (!fdSavedMountNamespace) - throw SysError("saving parent mount namespace"); - - fdSavedRoot = open("/proc/self/root", O_RDONLY); - }); -#endif -} - -void restoreMountNamespace() -{ -#if __linux__ - try { - auto savedCwd = absPath("."); - - if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1) - throw SysError("restoring parent mount namespace"); - - if (fdSavedRoot) { - if (fchdir(fdSavedRoot.get())) - throw SysError("chdir into saved root"); - if (chroot(".")) - throw SysError("chroot into saved root"); - } - - if (chdir(savedCwd.c_str()) == -1) - throw SysError("restoring cwd"); - } catch (Error & e) { - debug(e.msg()); - } -#endif -} - -void unshareFilesystem() -{ -#ifdef __linux__ - if (unshare(CLONE_FS) != 0 && errno != EPERM) - throw SysError("unsharing filesystem state in download thread"); -#endif -} - -void restoreProcessContext(bool restoreMounts) -{ - restoreSignals(); - if (restoreMounts) { - restoreMountNamespace(); - } - - #if __linux__ - if (savedStackSize) { - struct rlimit limit; - if (getrlimit(RLIMIT_STACK, &limit) == 0) { - limit.rlim_cur = savedStackSize; - setrlimit(RLIMIT_STACK, &limit); - } - } - #endif -} - -/* RAII helper to automatically deregister a callback. */ -struct InterruptCallbackImpl : InterruptCallback -{ - InterruptCallbacks::Token token; - ~InterruptCallbackImpl() override - { - auto interruptCallbacks(_interruptCallbacks.lock()); - interruptCallbacks->callbacks.erase(token); - } -}; - -std::unique_ptr createInterruptCallback(std::function callback) -{ - auto interruptCallbacks(_interruptCallbacks.lock()); - auto token = interruptCallbacks->nextToken++; - interruptCallbacks->callbacks.emplace(token, callback); - - auto res = std::make_unique(); - res->token = token; - - return std::unique_ptr(res.release()); -} - - -AutoCloseFD createUnixDomainSocket() -{ - AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM - #ifdef SOCK_CLOEXEC - | SOCK_CLOEXEC - #endif - , 0); - if (!fdSocket) - throw SysError("cannot create Unix domain socket"); - closeOnExec(fdSocket.get()); - return fdSocket; -} - - -AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) -{ - auto fdSocket = nix::createUnixDomainSocket(); - - bind(fdSocket.get(), path); - - if (chmod(path.c_str(), mode) == -1) - throw SysError("changing permissions on '%1%'", path); - - if (listen(fdSocket.get(), 100) == -1) - throw SysError("cannot listen on socket '%1%'", path); - - return fdSocket; -} - - -void bind(int fd, const std::string & path) -{ - unlink(path.c_str()); - - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - - if (path.size() + 1 >= sizeof(addr.sun_path)) { - Pid pid = startProcess([&]() { - Path dir = dirOf(path); - if (chdir(dir.c_str()) == -1) - throw SysError("chdir to '%s' failed", dir); - std::string base(baseNameOf(path)); - if (base.size() + 1 >= sizeof(addr.sun_path)) - throw Error("socket path '%s' is too long", base); - memcpy(addr.sun_path, base.c_str(), base.size() + 1); - if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot bind to socket '%s'", path); - _exit(0); - }); - int status = pid.wait(); - if (status != 0) - throw Error("cannot bind to socket '%s'", path); - } else { - memcpy(addr.sun_path, path.c_str(), path.size() + 1); - if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot bind to socket '%s'", path); - } -} - - -void connect(int fd, const std::string & path) -{ - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - - if (path.size() + 1 >= sizeof(addr.sun_path)) { - Pid pid = startProcess([&]() { - Path dir = dirOf(path); - if (chdir(dir.c_str()) == -1) - throw SysError("chdir to '%s' failed", dir); - std::string base(baseNameOf(path)); - if (base.size() + 1 >= sizeof(addr.sun_path)) - throw Error("socket path '%s' is too long", base); - memcpy(addr.sun_path, base.c_str(), base.size() + 1); - if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot connect to socket at '%s'", path); - _exit(0); - }); - int status = pid.wait(); - if (status != 0) - throw Error("cannot connect to socket at '%s'", path); - } else { - memcpy(addr.sun_path, path.c_str(), path.size() + 1); - if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot connect to socket at '%s'", path); - } -} - - std::string showBytes(uint64_t bytes) { return fmt("%.2f MiB", bytes / (1024.0 * 1024.0)); } - -// FIXME: move to libstore/build -void commonChildInit() -{ - logger = makeSimpleLogger(); - - const static std::string pathNullDevice = "/dev/null"; - restoreProcessContext(false); - - /* Put the child in a separate session (and thus a separate - process group) so that it has no controlling terminal (meaning - that e.g. ssh cannot open /dev/tty) and it doesn't receive - terminal signals. */ - if (setsid() == -1) - throw SysError("creating a new session"); - - /* Dup stderr to stdout. */ - if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) - throw SysError("cannot dup stderr into stdout"); - - /* Reroute stdin to /dev/null. */ - int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); - if (fdDevNull == -1) - throw SysError("cannot open '%1%'", pathNullDevice); - if (dup2(fdDevNull, STDIN_FILENO) == -1) - throw SysError("cannot dup null device into stdin"); - close(fdDevNull); -} - } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b302d6f45..27faa4d6d 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -4,491 +4,18 @@ #include "types.hh" #include "error.hh" #include "logging.hh" -#include "ansicolor.hh" - -#include -#include -#include -#include -#include #include -#include #include #include #include #include -#ifndef HAVE_STRUCT_DIRENT_D_TYPE -#define DT_UNKNOWN 0 -#define DT_REG 1 -#define DT_LNK 2 -#define DT_DIR 3 -#endif - namespace nix { -struct Sink; -struct Source; - void initLibUtil(); -/** - * The system for which Nix is compiled. - */ -extern const std::string nativeSystem; - - -/** - * @return an environment variable. - */ -std::optional getEnv(const std::string & key); - -/** - * @return a non empty environment variable. Returns nullopt if the env - * variable is set to "" - */ -std::optional getEnvNonEmpty(const std::string & key); - -/** - * Get the entire environment. - */ -std::map getEnv(); - -/** - * Clear the environment. - */ -void clearEnv(); - -/** - * @return An absolutized path, resolving paths relative to the - * specified directory, or the current directory otherwise. The path - * is also canonicalised. - */ -Path absPath(Path path, - std::optional dir = {}, - bool resolveSymlinks = false); - -/** - * Canonicalise a path by removing all `.` or `..` components and - * double or trailing slashes. Optionally resolves all symlink - * components such that each component of the resulting path is *not* - * a symbolic link. - */ -Path canonPath(PathView path, bool resolveSymlinks = false); - -/** - * @return The directory part of the given canonical path, i.e., - * everything before the final `/`. If the path is the root or an - * immediate child thereof (e.g., `/foo`), this means `/` - * is returned. - */ -Path dirOf(const PathView path); - -/** - * @return the base name of the given canonical path, i.e., everything - * following the final `/` (trailing slashes are removed). - */ -std::string_view baseNameOf(std::string_view path); - -/** - * Perform tilde expansion on a path. - */ -std::string expandTilde(std::string_view path); - -/** - * Check whether 'path' is a descendant of 'dir'. Both paths must be - * canonicalized. - */ -bool isInDir(std::string_view path, std::string_view dir); - -/** - * Check whether 'path' is equal to 'dir' or a descendant of - * 'dir'. Both paths must be canonicalized. - */ -bool isDirOrInDir(std::string_view path, std::string_view dir); - -/** - * Get status of `path`. - */ -struct stat stat(const Path & path); -struct stat lstat(const Path & path); - -/** - * @return true iff the given path exists. - */ -bool pathExists(const Path & path); - -/** - * A version of pathExists that returns false on a permission error. - * Useful for inferring default paths across directories that might not - * be readable. - * @return true iff the given path can be accessed and exists - */ -bool pathAccessible(const Path & path); - -/** - * Read the contents (target) of a symbolic link. The result is not - * in any way canonicalised. - */ -Path readLink(const Path & path); - -bool isLink(const Path & path); - -/** - * Read the contents of a directory. The entries `.` and `..` are - * removed. - */ -struct DirEntry -{ - std::string name; - ino_t ino; - /** - * one of DT_* - */ - unsigned char type; - DirEntry(std::string name, ino_t ino, unsigned char type) - : name(std::move(name)), ino(ino), type(type) { } -}; - -typedef std::vector DirEntries; - -DirEntries readDirectory(const Path & path); - -unsigned char getFileType(const Path & path); - -/** - * Read the contents of a file into a string. - */ -std::string readFile(int fd); -std::string readFile(const Path & path); -void readFile(const Path & path, Sink & sink); - -/** - * Write a string to a file. - */ -void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); - -void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); - -/** - * Flush a file's parent directory to disk - */ -void syncParent(const Path & path); - -/** - * Read a line from a file descriptor. - */ -std::string readLine(int fd); - -/** - * Write a line to a file descriptor. - */ -void writeLine(int fd, std::string s); - -/** - * Delete a path; i.e., in the case of a directory, it is deleted - * recursively. It's not an error if the path does not exist. The - * second variant returns the number of bytes and blocks freed. - */ -void deletePath(const Path & path); - -void deletePath(const Path & path, uint64_t & bytesFreed); - -std::string getUserName(); - -/** - * @return the given user's home directory from /etc/passwd. - */ -Path getHomeOf(uid_t userId); - -/** - * @return $HOME or the user's home directory from /etc/passwd. - */ -Path getHome(); - -/** - * @return $XDG_CACHE_HOME or $HOME/.cache. - */ -Path getCacheDir(); - -/** - * @return $XDG_CONFIG_HOME or $HOME/.config. - */ -Path getConfigDir(); - -/** - * @return the directories to search for user configuration files - */ -std::vector getConfigDirs(); - -/** - * @return $XDG_DATA_HOME or $HOME/.local/share. - */ -Path getDataDir(); - -/** - * @return the path of the current executable. - */ -std::optional getSelfExe(); - -/** - * @return $XDG_STATE_HOME or $HOME/.local/state. - */ -Path getStateDir(); - -/** - * Create the Nix state directory and return the path to it. - */ -Path createNixStateDir(); - -/** - * Create a directory and all its parents, if necessary. Returns the - * list of created directories, in order of creation. - */ -Paths createDirs(const Path & path); -inline Paths createDirs(PathView path) -{ - return createDirs(Path(path)); -} - -/** - * Create a symlink. - */ -void createSymlink(const Path & target, const Path & link); - -/** - * Atomically create or replace a symlink. - */ -void replaceSymlink(const Path & target, const Path & link); - -void renameFile(const Path & src, const Path & dst); - -/** - * Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst` - * are on a different filesystem. - * - * Beware that this might not be atomic because of the copy that happens behind - * the scenes - */ -void moveFile(const Path & src, const Path & dst); - - -/** - * Wrappers arount read()/write() that read/write exactly the - * requested number of bytes. - */ -void readFull(int fd, char * buf, size_t count); -void writeFull(int fd, std::string_view s, bool allowInterrupts = true); - -MakeError(EndOfFile, Error); - - -/** - * Read a file descriptor until EOF occurs. - */ -std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); - -void drainFD(int fd, Sink & sink, bool block = true); - -/** - * If cgroups are active, attempt to calculate the number of CPUs available. - * If cgroups are unavailable or if cpu.max is set to "max", return 0. - */ -unsigned int getMaxCPU(); - -/** - * Automatic cleanup of resources. - */ - - -class AutoDelete -{ - Path path; - bool del; - bool recursive; -public: - AutoDelete(); - AutoDelete(const Path & p, bool recursive = true); - ~AutoDelete(); - void cancel(); - void reset(const Path & p, bool recursive = true); - operator Path() const { return path; } - operator PathView() const { return path; } -}; - - -class AutoCloseFD -{ - int fd; -public: - AutoCloseFD(); - AutoCloseFD(int fd); - AutoCloseFD(const AutoCloseFD & fd) = delete; - AutoCloseFD(AutoCloseFD&& fd); - ~AutoCloseFD(); - AutoCloseFD& operator =(const AutoCloseFD & fd) = delete; - AutoCloseFD& operator =(AutoCloseFD&& fd); - int get() const; - explicit operator bool() const; - int release(); - void close(); - void fsync(); -}; - - -/** - * Create a temporary directory. - */ -Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", - bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); - -/** - * Create a temporary file, returning a file handle and its path. - */ -std::pair createTempFile(const Path & prefix = "nix"); - - -class Pipe -{ -public: - AutoCloseFD readSide, writeSide; - void create(); - void close(); -}; - - -struct DIRDeleter -{ - void operator()(DIR * dir) const { - closedir(dir); - } -}; - -typedef std::unique_ptr AutoCloseDir; - - -class Pid -{ - pid_t pid = -1; - bool separatePG = false; - int killSignal = SIGKILL; -public: - Pid(); - Pid(pid_t pid); - ~Pid(); - void operator =(pid_t pid); - operator pid_t(); - int kill(); - int wait(); - - void setSeparatePG(bool separatePG); - void setKillSignal(int signal); - pid_t release(); -}; - - -/** - * Kill all processes running under the specified uid by sending them - * a SIGKILL. - */ -void killUser(uid_t uid); - - -/** - * Fork a process that runs the given function, and return the child - * pid to the caller. - */ -struct ProcessOptions -{ - std::string errorPrefix = ""; - bool dieWithParent = true; - bool runExitHandlers = false; - bool allowVfork = false; - /** - * use clone() with the specified flags (Linux only) - */ - int cloneFlags = 0; -}; - -pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions()); - - -/** - * Run a program and return its stdout in a string (i.e., like the - * shell backtick operator). - */ -std::string runProgram(Path program, bool searchPath = false, - const Strings & args = Strings(), - const std::optional & input = {}, bool isInteractive = false); - -struct RunOptions -{ - Path program; - bool searchPath = true; - Strings args; - std::optional uid; - std::optional gid; - std::optional chdir; - std::optional> environment; - std::optional input; - Source * standardIn = nullptr; - Sink * standardOut = nullptr; - bool mergeStderrToStdout = false; - bool isInteractive = false; -}; - -std::pair runProgram(RunOptions && options); - -void runProgram2(const RunOptions & options); - - -/** - * Change the stack size. - */ -void setStackSize(size_t stackSize); - - -/** - * Restore the original inherited Unix process context (such as signal - * masks, stack size). - - * See startSignalHandlerThread(), saveSignalMask(). - */ -void restoreProcessContext(bool restoreMounts = true); - -/** - * Save the current mount namespace. Ignored if called more than - * once. - */ -void saveMountNamespace(); - -/** - * Restore the mount namespace saved by saveMountNamespace(). Ignored - * if saveMountNamespace() was never called. - */ -void restoreMountNamespace(); - -/** - * Cause this thread to not share any FS attributes with the main - * thread, because this causes setns() in restoreMountNamespace() to - * fail. - */ -void unshareFilesystem(); - - -class ExecError : public Error -{ -public: - int status; - - template - ExecError(int status, const Args & ... args) - : Error(args...), status(status) - { } -}; - /** * Convert a list of strings to a null-terminated vector of `char * *`s. The result must not be accessed beyond the lifetime of the @@ -496,36 +23,6 @@ public: */ std::vector stringsToCharPtrs(const Strings & ss); -/** - * Close all file descriptors except those listed in the given set. - * Good practice in child processes. - */ -void closeMostFDs(const std::set & exceptions); - -/** - * Set the close-on-exec flag for the given file descriptor. - */ -void closeOnExec(int fd); - - -/* User interruption. */ - -extern std::atomic _isInterrupted; - -extern thread_local std::function interruptCheck; - -void setInterruptThrown(); - -void _interrupted(); - -void inline checkInterrupt() -{ - if (_isInterrupted || (interruptCheck && interruptCheck())) - _interrupted(); -} - -MakeError(Interrupted, BaseError); - MakeError(FormatError, Error); @@ -601,15 +98,6 @@ std::string replaceStrings( std::string rewriteStrings(std::string s, const StringMap & rewrites); -/** - * Convert the exit status of a child as returned by wait() into an - * error string. - */ -std::string statusToString(int status); - -bool statusOk(int status); - - /** * Parse a string into an integer. */ @@ -701,10 +189,8 @@ std::string toLower(const std::string & s); std::string shellEscape(const std::string_view s); -/** - * Exception handling in destructors: print an error message, then - * ignore the exception. - */ +/* Exception handling in destructors: print an error message, then + ignore the exception. */ void ignoreException(Verbosity lvl = lvlError); @@ -717,23 +203,6 @@ constexpr char treeLast[] = "└───"; constexpr char treeLine[] = "│ "; constexpr char treeNull[] = " "; -/** - * Determine whether ANSI escape sequences are appropriate for the - * present output. - */ -bool shouldANSI(); - -/** - * Truncate a string to 'width' printable characters. If 'filterAll' - * is true, all ANSI escape sequences are filtered out. Otherwise, - * some escape sequences (such as colour setting) are copied but not - * included in the character count. Also, tabs are expanded to - * spaces. - */ -std::string filterANSIEscapes(std::string_view s, - bool filterAll = false, - unsigned int width = std::numeric_limits::max()); - /** * Base64 encoding/decoding. @@ -821,61 +290,6 @@ template class Callback; -/** - * Start a thread that handles various signals. Also block those signals - * on the current thread (and thus any threads created by it). - * Saves the signal mask before changing the mask to block those signals. - * See saveSignalMask(). - */ -void startSignalHandlerThread(); - -/** - * Saves the signal mask, which is the signal mask that nix will restore - * before creating child processes. - * See setChildSignalMask() to set an arbitrary signal mask instead of the - * current mask. - */ -void saveSignalMask(); - -/** - * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't - * necessarily match the current thread's mask. - * See saveSignalMask() to set the saved mask to the current mask. - */ -void setChildSignalMask(sigset_t *sigs); - -struct InterruptCallback -{ - virtual ~InterruptCallback() { }; -}; - -/** - * Register a function that gets called on SIGINT (in a non-signal - * context). - */ -std::unique_ptr createInterruptCallback( - std::function callback); - -void triggerInterrupt(); - -/** - * A RAII class that causes the current thread to receive SIGUSR1 when - * the signal handler thread receives SIGINT. That is, this allows - * SIGINT to be multiplexed to multiple threads. - */ -struct ReceiveInterrupts -{ - pthread_t target; - std::unique_ptr callback; - - ReceiveInterrupts() - : target(pthread_self()) - , callback(createInterruptCallback([&]() { pthread_kill(target, SIGUSR1); })) - { } -}; - - - /** * A RAII helper that increments a counter on construction and * decrements it on destruction. @@ -890,45 +304,6 @@ struct MaintainCount }; -/** - * @return the number of rows and columns of the terminal. - */ -std::pair getWindowSize(); - - -/** - * Used in various places. - */ -typedef std::function PathFilter; - -extern PathFilter defaultPathFilter; - -/** - * Common initialisation performed in child processes. - */ -void commonChildInit(); - -/** - * Create a Unix domain socket. - */ -AutoCloseFD createUnixDomainSocket(); - -/** - * Create a Unix domain socket in listen mode. - */ -AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); - -/** - * Bind a Unix domain socket to a path. - */ -void bind(int fd, const std::string & path); - -/** - * Connect to a Unix domain socket. - */ -void connect(int fd, const std::string & path); - - /** * A Rust/Python-like enumerate() iterator adapter. * diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index e62c4f6b1..75ce12a8c 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -9,12 +9,12 @@ #include +#include "current-process.hh" #include "parsed-derivations.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "globals.hh" #include "derivations.hh" -#include "util.hh" #include "shared.hh" #include "path-with-outputs.hh" #include "eval.hh" @@ -449,7 +449,7 @@ static void main_nix_build(int argc, char * * argv) } } for (const auto & src : drv.inputSrcs) { - pathsToBuild.push_back(DerivedPath::Opaque{src}); + pathsToBuild.emplace_back(DerivedPath::Opaque{src}); pathsToCopy.insert(src); } diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 4504441fa..79db78236 100644 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -5,7 +5,7 @@ #include "store-api.hh" #include "legacy.hh" #include "eval-settings.hh" // for defexpr -#include "util.hh" +#include "users.hh" #include "tarball.hh" #include diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index 70af53b28..bb3f1bc6a 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -1,3 +1,5 @@ +#include "file-system.hh" +#include "signals.hh" #include "store-api.hh" #include "store-cast.hh" #include "gc-store.hh" diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 01742daa8..213a20d93 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "attr-path.hh" #include "common-eval-args.hh" #include "derivations.hh" @@ -11,7 +12,6 @@ #include "store-api.hh" #include "local-fs-store.hh" #include "user-env.hh" -#include "util.hh" #include "value-to-json.hh" #include "xml-writer.hh" #include "legacy.hh" @@ -481,12 +481,12 @@ static void printMissing(EvalState & state, DrvInfos & elems) std::vector targets; for (auto & i : elems) if (auto drvPath = i.queryDrvPath()) - targets.push_back(DerivedPath::Built{ + targets.emplace_back(DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), .outputs = OutputsSpec::All { }, }); else - targets.push_back(DerivedPath::Opaque{ + targets.emplace_back(DerivedPath::Opaque{ .path = i.queryOutPath(), }); diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index d12d70f33..250224e7d 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -1,5 +1,4 @@ #include "user-env.hh" -#include "util.hh" #include "derivations.hh" #include "store-api.hh" #include "path-with-outputs.hh" diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index d40196497..c67409e89 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -6,7 +6,6 @@ #include "attr-path.hh" #include "value-to-xml.hh" #include "value-to-json.hh" -#include "util.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "common-eval-args.hh" diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc index 577cadceb..2c530999b 100644 --- a/src/nix-store/dotgraph.cc +++ b/src/nix-store/dotgraph.cc @@ -1,5 +1,4 @@ #include "dotgraph.hh" -#include "util.hh" #include "store-api.hh" #include diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc index 439557658..3e789a2d8 100644 --- a/src/nix-store/graphml.cc +++ b/src/nix-store/graphml.cc @@ -1,5 +1,4 @@ #include "graphml.hh" -#include "util.hh" #include "store-api.hh" #include "derivations.hh" diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index e4dd94585..123283dfe 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -11,7 +11,6 @@ #include "serve-protocol.hh" #include "serve-protocol-impl.hh" #include "shared.hh" -#include "util.hh" #include "graphml.hh" #include "legacy.hh" #include "path-with-outputs.hh" diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc index af428018a..373dedf7c 100644 --- a/src/nix/daemon.cc +++ b/src/nix/daemon.cc @@ -1,11 +1,12 @@ ///@file +#include "signals.hh" +#include "unix-domain-socket.hh" #include "command.hh" #include "shared.hh" #include "local-store.hh" #include "remote-store.hh" #include "remote-store-connection.hh" -#include "util.hh" #include "serialise.hh" #include "archive.hh" #include "globals.hh" diff --git a/src/nix/develop.cc b/src/nix/develop.cc index b080a3939..38482ed42 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -8,7 +8,6 @@ #include "derivations.hh" #include "progress-bar.hh" #include "run.hh" -#include "util.hh" #include #include diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index 1aa6831d3..59f9e3e5d 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -6,7 +6,6 @@ #include "shared.hh" #include "store-api.hh" #include "local-fs-store.hh" -#include "util.hh" #include "worker-protocol.hh" using namespace nix; diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 66629fab0..9cbab230b 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -1,3 +1,4 @@ +#include "current-process.hh" #include "command-installable-value.hh" #include "shared.hh" #include "eval.hh" diff --git a/src/nix/flake.cc b/src/nix/flake.cc index e8906a252..38938f09e 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -15,6 +15,7 @@ #include "registry.hh" #include "eval-cache.hh" #include "markdown.hh" +#include "users.hh" #include #include diff --git a/src/nix/main.cc b/src/nix/main.cc index d20bc1f8a..73641f6d2 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -1,6 +1,8 @@ #include #include "args/root.hh" +#include "current-process.hh" +#include "namespaces.hh" #include "command.hh" #include "common-args.hh" #include "eval.hh" @@ -20,6 +22,7 @@ #include #include #include +#include #include @@ -426,7 +429,9 @@ void mainWrapped(int argc, char * * argv) }); try { - args.parseCmdline(argvToStrings(argc, argv)); + auto isNixCommand = std::regex_search(programName, std::regex("nix$")); + auto allowShebang = isNixCommand && argc > 1; + args.parseCmdline(argvToStrings(argc, argv),allowShebang); } catch (UsageError &) { if (!args.helpRequested && !args.completions) throw; } diff --git a/src/nix/nix.md b/src/nix/nix.md index 6e7e8a649..eb150f03b 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -238,4 +238,67 @@ operate are determined as follows: Most `nix` subcommands operate on a *Nix store*. These are documented in [`nix help-stores`](./nix3-help-stores.md). +# Shebang interpreter + +The `nix` command can be used as a `#!` interpreter. +Arguments to Nix can be passed on subsequent lines in the script. + +Verbatim strings may be passed in double backtick (```` `` ````) quotes. +Sequences of _n_ backticks of 3 or longer are parsed as _n-1_ literal backticks. +A single space before the closing ```` `` ```` is ignored if present. + +`--file` and `--expr` resolve relative paths based on the script location. + +Examples: + +``` +#!/usr/bin/env nix +#! nix shell --file ```` hello cowsay --command bash + +hello | cowsay +``` + +or with **flakes**: + +``` +#!/usr/bin/env nix +#! nix shell nixpkgs#bash nixpkgs#hello nixpkgs#cowsay --command bash + +hello | cowsay +``` + +or with an **expression**: + +```bash +#! /usr/bin/env nix +#! nix shell --impure --expr `` +#! nix with (import (builtins.getFlake "nixpkgs") {}); +#! nix terraform.withPlugins (plugins: [ plugins.openstack ]) +#! nix `` +#! nix --command bash + +terraform "$@" +``` + +or with cascading interpreters. Note that the `#! nix` lines don't need to follow after the first line, to accomodate other interpreters. + +``` +#!/usr/bin/env nix +//! ```cargo +//! [dependencies] +//! time = "0.1.25" +//! ``` +/* +#!nix shell nixpkgs#rustc nixpkgs#rust-script nixpkgs#cargo --command rust-script +*/ +fn main() { + for argument in std::env::args().skip(1) { + println!("{}", argument); + }; + println!("{}", std::env::var("HOME").expect("")); + println!("{}", time::now().rfc822z()); +} +// vim: ft=rust +``` + )"" diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index c16864d30..23198a120 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -9,6 +9,75 @@ #include using namespace nix; +using nlohmann::json; + +/** + * @return the total size of a set of store objects (specified by path), + * that is, the sum of the size of the NAR serialisation of each object + * in the set. + */ +static uint64_t getStoreObjectsTotalSize(Store & store, const StorePathSet & closure) +{ + uint64_t totalNarSize = 0; + for (auto & p : closure) { + totalNarSize += store.queryPathInfo(p)->narSize; + } + return totalNarSize; +} + + +/** + * Write a JSON representation of store object metadata, such as the + * hash and the references. + * + * @param showClosureSize If true, the closure size of each path is + * included. + */ +static json pathInfoToJSON( + Store & store, + const StorePathSet & storePaths, + bool showClosureSize) +{ + json::object_t jsonAllObjects = json::object(); + + for (auto & storePath : storePaths) { + json jsonObject; + + try { + auto info = store.queryPathInfo(storePath); + + jsonObject = info->toJSON(store, true, HashFormat::SRI); + + if (showClosureSize) { + StorePathSet closure; + store.computeFSClosure(storePath, closure, false, false); + + jsonObject["closureSize"] = getStoreObjectsTotalSize(store, closure); + + if (auto * narInfo = dynamic_cast(&*info)) { + uint64_t totalDownloadSize = 0; + for (auto & p : closure) { + auto depInfo = store.queryPathInfo(p); + if (auto * depNarInfo = dynamic_cast(&*depInfo)) + totalDownloadSize += depNarInfo->fileSize; + else + throw Error("Missing .narinfo for dep %s of %s", + store.printStorePath(p), + store.printStorePath(storePath)); + } + jsonObject["closureDownloadSize"] = totalDownloadSize; + } + } + + } catch (InvalidPath &) { + jsonObject = nullptr; + } + + jsonAllObjects[store.printStorePath(storePath)] = std::move(jsonObject); + } + return jsonAllObjects; +} + struct CmdPathInfo : StorePathsCommand, MixJSON { @@ -87,10 +156,11 @@ struct CmdPathInfo : StorePathsCommand, MixJSON pathLen = std::max(pathLen, store->printStorePath(storePath).size()); if (json) { - std::cout << store->pathInfoToJSON( + std::cout << pathInfoToJSON( + *store, // FIXME: preserve order? StorePathSet(storePaths.begin(), storePaths.end()), - true, showClosureSize, HashFormat::SRI, AllowInvalid).dump(); + showClosureSize).dump(); } else { @@ -107,8 +177,11 @@ struct CmdPathInfo : StorePathsCommand, MixJSON if (showSize) printSize(info->narSize); - if (showClosureSize) - printSize(store->getClosureSize(info->path).first); + if (showClosureSize) { + StorePathSet closure; + store->computeFSClosure(storePath, closure, false, false); + printSize(getStoreObjectsTotalSize(*store, closure)); + } if (showSigs) { std::cout << '\t'; diff --git a/src/nix/path-info.md b/src/nix/path-info.md index 2dda866d0..4594854eb 100644 --- a/src/nix/path-info.md +++ b/src/nix/path-info.md @@ -43,7 +43,7 @@ R""( command): ```console - # nix path-info --json --all | jq -r 'sort_by(.registrationTime)[-11:-1][].path' + # nix path-info --json --all | jq -r 'to_entries | sort_by(.value.registrationTime) | .[-11:-1][] | .key' ``` * Show the size of the entire Nix store: @@ -58,13 +58,13 @@ R""( ```console # nix path-info --json --all --closure-size \ - | jq 'map(select(.closureSize > 1e9)) | sort_by(.closureSize) | map([.path, .closureSize])' + | jq 'map_values(.closureSize | select(. < 1e9)) | to_entries | sort_by(.value)' [ …, - [ - "/nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65", - 5887562256 - ] + { + .key = "/nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65", + .value = 5887562256, + } ] ``` diff --git a/src/nix/run.cc b/src/nix/run.cc index 1465e8cde..ea0a17897 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -1,3 +1,4 @@ +#include "current-process.hh" #include "run.hh" #include "command-installable-value.hh" #include "common-args.hh" diff --git a/src/nix/shell.md b/src/nix/shell.md index f36919575..7c315fb3f 100644 --- a/src/nix/shell.md +++ b/src/nix/shell.md @@ -51,4 +51,120 @@ R""( provides the specified [*installables*](./nix.md#installable). If no command is specified, it starts the default shell of your user account specified by `$SHELL`. +# Use as a `#!`-interpreter + +You can use `nix` as a script interpreter to allow scripts written +in arbitrary languages to obtain their own dependencies via Nix. This is +done by starting the script with the following lines: + +```bash +#! /usr/bin/env nix +#! nix shell installables --command real-interpreter +``` + +where *real-interpreter* is the “real” script interpreter that will be +invoked by `nix shell` after it has obtained the dependencies and +initialised the environment, and *installables* are the attribute names of +the dependencies in Nixpkgs. + +The lines starting with `#! nix` specify options (see above). Note that you +cannot write `#! /usr/bin/env nix shell -i ...` because many operating systems +only allow one argument in `#!` lines. + +For example, here is a Python script that depends on Python and the +`prettytable` package: + +```python +#! /usr/bin/env nix +#! nix shell github:tomberek/-#python3With.prettytable --command python + +import prettytable + +# Print a simple table. +t = prettytable.PrettyTable(["N", "N^2"]) +for n in range(1, 10): t.add_row([n, n * n]) +print t +``` + +Similarly, the following is a Perl script that specifies that it +requires Perl and the `HTML::TokeParser::Simple` and `LWP` packages: + +```perl +#! /usr/bin/env nix +#! nix shell github:tomberek/-#perlWith.HTMLTokeParserSimple.LWP --command perl -x + +use HTML::TokeParser::Simple; + +# Fetch nixos.org and print all hrefs. +my $p = HTML::TokeParser::Simple->new(url => 'http://nixos.org/'); + +while (my $token = $p->get_tag("a")) { + my $href = $token->get_attr("href"); + print "$href\n" if $href; +} +``` + +Sometimes you need to pass a simple Nix expression to customize a +package like Terraform: + +```bash +#! /usr/bin/env nix +#! nix shell --impure --expr `` +#! nix with (import (builtins.getFlake ''nixpkgs'') {}); +#! nix terraform.withPlugins (plugins: [ plugins.openstack ]) +#! nix `` +#! nix --command bash + +terraform "$@" +``` + +> **Note** +> +> You must use double backticks (```` `` ````) when passing a simple Nix expression +> in a nix shell shebang. + +Finally, using the merging of multiple nix shell shebangs the following +Haskell script uses a specific branch of Nixpkgs/NixOS (the 21.11 stable +branch): + +```haskell +#!/usr/bin/env nix +#!nix shell --override-input nixpkgs github:NixOS/nixpkgs/nixos-21.11 +#!nix github:tomberek/-#haskellWith.download-curl.tagsoup --command runghc + +import Network.Curl.Download +import Text.HTML.TagSoup +import Data.Either +import Data.ByteString.Char8 (unpack) + +-- Fetch nixos.org and print all hrefs. +main = do + resp <- openURI "https://nixos.org/" + let tags = filter (isTagOpenName "a") $ parseTags $ unpack $ fromRight undefined resp + let tags' = map (fromAttrib "href") tags + mapM_ putStrLn $ filter (/= "") tags' +``` + +If you want to be even more precise, you can specify a specific revision +of Nixpkgs: + + #!nix shell --override-input nixpkgs github:NixOS/nixpkgs/eabc38219184cc3e04a974fe31857d8e0eac098d + +You can also use a Nix expression to build your own dependencies. For example, +the Python example could have been written as: + +```python +#! /usr/bin/env nix +#! nix shell --impure --file deps.nix -i python +``` + +where the file `deps.nix` in the same directory as the `#!`-script +contains: + +```nix +with import {}; +python3.withPackages (ps: with ps; [ prettytable ]) +``` + + )"" diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 45cd2e1a6..a68616355 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -1,3 +1,4 @@ +#include "signals.hh" #include "command.hh" #include "shared.hh" #include "store-api.hh" diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index d238456db..c529c2363 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -1,3 +1,4 @@ +#include "processes.hh" #include "command.hh" #include "common-args.hh" #include "store-api.hh" diff --git a/src/nix/verify.cc b/src/nix/verify.cc index adaa33c0c..78cb765ce 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -4,6 +4,7 @@ #include "sync.hh" #include "thread-pool.hh" #include "references.hh" +#include "signals.hh" #include diff --git a/tests/functional/config.sh b/tests/functional/config.sh index 723f575ed..0780c55d0 100644 --- a/tests/functional/config.sh +++ b/tests/functional/config.sh @@ -50,7 +50,8 @@ exp_cores=$(nix show-config | grep '^cores' | cut -d '=' -f 2 | xargs) exp_features=$(nix show-config | grep '^experimental-features' | cut -d '=' -f 2 | xargs) [[ $prev != $exp_cores ]] [[ $exp_cores == "4242" ]] -[[ $exp_features == "flakes nix-command" ]] +# flakes implies fetch-tree +[[ $exp_features == "fetch-tree flakes nix-command" ]] # Test that it's possible to retrieve a single setting's value val=$(nix show-config | grep '^warn-dirty' | cut -d '=' -f 2 | xargs) diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh new file mode 100644 index 000000000..b80e061b5 --- /dev/null +++ b/tests/functional/fetchGitVerification.sh @@ -0,0 +1,82 @@ +source common.sh + +requireGit +[[ $(type -p ssh-keygen) ]] || skipTest "ssh-keygen not installed" # require ssh-keygen + +enableFeatures "verified-fetches" + +clearStore + +repo="$TEST_ROOT/git" + +# generate signing keys +keysDir=$TEST_ROOT/.ssh +mkdir -p "$keysDir" +ssh-keygen -f "$keysDir/testkey1" -t ed25519 -P "" -C "test key 1" +key1File="$keysDir/testkey1.pub" +publicKey1=$(awk '{print $2}' "$key1File") +ssh-keygen -f "$keysDir/testkey2" -t rsa -P "" -C "test key 2" +key2File="$keysDir/testkey2.pub" +publicKey2=$(awk '{print $2}' "$key2File") + +git init $repo +git -C $repo config user.email "foobar@example.com" +git -C $repo config user.name "Foobar" +git -C $repo config gpg.format ssh + +echo 'hello' > $repo/text +git -C $repo add text +git -C $repo -c "user.signingkey=$key1File" commit -S -m 'initial commit' + +out=$(nix eval --impure --raw --expr "builtins.fetchGit { url = \"file://$repo\"; keytype = \"ssh-rsa\"; publicKey = \"$publicKey2\"; }" 2>&1) || status=$? +[[ $status == 1 ]] +[[ $out =~ 'No principal matched.' ]] +[[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKey = \"$publicKey1\"; } + \"/text\")") = 'hello' ]] + +echo 'hello world' > $repo/text + +# Verification on a dirty repo should fail. +out=$(nix eval --impure --raw --expr "builtins.fetchGit { url = \"file://$repo\"; keytype = \"ssh-rsa\"; publicKey = \"$publicKey2\"; }" 2>&1) || status=$? +[[ $status == 1 ]] +[[ $out =~ 'dirty' ]] + +git -C $repo add text +git -C $repo -c "user.signingkey=$key2File" commit -S -m 'second commit' + +[[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKeys = [{key = \"$publicKey1\";} {type = \"ssh-rsa\"; key = \"$publicKey2\";}]; } + \"/text\")") = 'hello world' ]] + +# Flake input test +flakeDir="$TEST_ROOT/flake" +mkdir -p "$flakeDir" +cat > "$flakeDir/flake.nix" < "$flakeDir/flake.nix" <&1) || status=$? +[[ $status == 1 ]] +[[ $out =~ 'No principal matched.' ]] diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index 8aed296e6..fc45cf7bf 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -11,6 +11,7 @@ writeSimpleFlake() { outputs = inputs: rec { packages.$system = rec { foo = import ./simple.nix; + fooScript = (import ./shell.nix {}).foo; default = foo; }; packages.someOtherSystem = rec { @@ -24,13 +25,13 @@ writeSimpleFlake() { } EOF - cp ../simple.nix ../simple.builder.sh ../config.nix $flakeDir/ + cp ../simple.nix ../shell.nix ../simple.builder.sh ../config.nix $flakeDir/ } createSimpleGitFlake() { local flakeDir="$1" writeSimpleFlake $flakeDir - git -C $flakeDir add flake.nix simple.nix simple.builder.sh config.nix + git -C $flakeDir add flake.nix simple.nix shell.nix simple.builder.sh config.nix git -C $flakeDir commit -m 'Initial' } diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index b0038935c..ccf1699f9 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -66,9 +66,82 @@ cat > "$nonFlakeDir/README.md" < "$nonFlakeDir/shebang.sh" < $nonFlakeDir/shebang-comments.sh < $nonFlakeDir/shebang-reject.sh < $nonFlakeDir/shebang-inline-expr.sh <> $nonFlakeDir/shebang-inline-expr.sh <<"EOF" +#! nix --offline shell +#! nix --impure --expr `` +#! nix let flake = (builtins.getFlake (toString ../flake1)).packages; +#! nix fooScript = flake.${builtins.currentSystem}.fooScript; +#! nix /* just a comment !@#$%^&*()__+ # */ +#! nix in fooScript +#! nix `` +#! nix --no-write-lock-file --command bash +set -ex +foo +echo "$@" +EOF +chmod +x $nonFlakeDir/shebang-inline-expr.sh + +cat > $nonFlakeDir/fooScript.nix <<"EOF" +let flake = (builtins.getFlake (toString ../flake1)).packages; + fooScript = flake.${builtins.currentSystem}.fooScript; + in fooScript +EOF + +cat > $nonFlakeDir/shebang-file.sh <> $nonFlakeDir/shebang-file.sh <<"EOF" +#! nix --offline shell +#! nix --impure --file ./fooScript.nix +#! nix --no-write-lock-file --command bash +set -ex +foo +echo "$@" +EOF +chmod +x $nonFlakeDir/shebang-file.sh + # Construct a custom registry, additionally test the --registry flag nix registry add --registry "$registry" flake1 "git+file://$flake1Dir" nix registry add --registry "$registry" flake2 "git+file://$percentEncodedFlake2Dir" @@ -511,3 +584,11 @@ nix flake metadata "$flake2Dir" --reference-lock-file $TEST_ROOT/flake2-overridd # reference-lock-file can only be used if allow-dirty is set. expectStderr 1 nix flake metadata "$flake2Dir" --no-allow-dirty --reference-lock-file $TEST_ROOT/flake2-overridden.lock + +# Test shebang +[[ $($nonFlakeDir/shebang.sh) = "foo" ]] +[[ $($nonFlakeDir/shebang.sh "bar") = "foo"$'\n'"bar" ]] +[[ $($nonFlakeDir/shebang-comments.sh ) = "foo" ]] +[[ $($nonFlakeDir/shebang-inline-expr.sh baz) = "foo"$'\n'"baz" ]] +[[ $($nonFlakeDir/shebang-file.sh baz) = "foo"$'\n'"baz" ]] +expect 1 $nonFlakeDir/shebang-reject.sh 2>&1 | grepQuiet -F 'error: unsupported unquoted character in nix shebang: *. Use double backticks to escape?' diff --git a/tests/functional/lang.sh b/tests/functional/lang.sh index c3acef5ee..12df32c87 100755 --- a/tests/functional/lang.sh +++ b/tests/functional/lang.sh @@ -23,6 +23,7 @@ nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2> nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuietInverse Hello nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grepQuietInverse Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello +expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello %" (throw "Foo")' | grepQuiet 'Hello %' nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ 2>&1 | grepQuiet -E 'trace: { x = «potential infinite recursion»; }' diff --git a/tests/functional/local.mk b/tests/functional/local.mk index 3679349f8..21dabca88 100644 --- a/tests/functional/local.mk +++ b/tests/functional/local.mk @@ -55,6 +55,7 @@ nix_tests = \ secure-drv-outputs.sh \ restricted.sh \ fetchGitSubmodules.sh \ + fetchGitVerification.sh \ flakes/search-root.sh \ readfile-context.sh \ nix-channel.sh \ @@ -119,6 +120,7 @@ nix_tests = \ flakes/show.sh \ impure-derivations.sh \ path-from-hash-part.sh \ + path-info.sh \ toString-path.sh \ read-only-store.sh \ nested-sandboxing.sh \ diff --git a/tests/functional/nar-access.sh b/tests/functional/nar-access.sh index 218b521fb..87981e7d9 100644 --- a/tests/functional/nar-access.sh +++ b/tests/functional/nar-access.sh @@ -27,9 +27,8 @@ diff -u baz.cat-nar $storePath/foo/baz # Check that 'nix store cat' fails on invalid store paths. invalidPath="$(dirname $storePath)/99999999999999999999999999999999-foo" -mv $storePath $invalidPath +cp -r $storePath $invalidPath expect 1 nix store cat $invalidPath/foo/baz -mv $invalidPath $storePath # Test --json. diff -u \ diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh new file mode 100644 index 000000000..763935eb7 --- /dev/null +++ b/tests/functional/path-info.sh @@ -0,0 +1,23 @@ +source common.sh + +echo foo > $TEST_ROOT/foo +foo=$(nix store add-file $TEST_ROOT/foo) + +echo bar > $TEST_ROOT/bar +bar=$(nix store add-file $TEST_ROOT/bar) + +echo baz > $TEST_ROOT/baz +baz=$(nix store add-file $TEST_ROOT/baz) +nix-store --delete "$baz" + +diff --unified --color=always \ + <(nix path-info --json "$foo" "$bar" "$baz" | + jq --sort-keys 'map_values(.narHash)') \ + <(jq --sort-keys <<-EOF + { + "$foo": "sha256-QvtAMbUl/uvi+LCObmqOhvNOapHdA2raiI4xG5zI5pA=", + "$bar": "sha256-9fhYGu9fqxcQC2Kc81qh2RMo1QcLBUBo8U+pPn+jthQ=", + "$baz": null + } +EOF + ) diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 6e621a28c..e59ee400e 100644 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -18,7 +18,7 @@ test_tarball() { local compressor="$2" tarball=$TEST_ROOT/tarball.tar$ext - (cd $TEST_ROOT && tar cf - tarball) | $compressor > $tarball + (cd $TEST_ROOT && GNUTAR_REPRODUCIBLE= tar --mtime=$tarroot/default.nix --owner=0 --group=0 --numeric-owner --sort=name -c -f - tarball) | $compressor > $tarball nix-env -f file://$tarball -qa --out-path | grepQuiet dependencies diff --git a/unit-test-data/libstore/nar-info/impure.json b/unit-test-data/libstore/nar-info/impure.json new file mode 100644 index 000000000..bb9791a6a --- /dev/null +++ b/unit-test-data/libstore/nar-info/impure.json @@ -0,0 +1,20 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "compression": "xz", + "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + "downloadHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "downloadSize": 4029176, + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "registrationTime": 23423, + "signatures": [ + "asdf", + "qwer" + ], + "ultimate": true, + "url": "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz" +} diff --git a/unit-test-data/libstore/nar-info/pure.json b/unit-test-data/libstore/nar-info/pure.json new file mode 100644 index 000000000..955baec31 --- /dev/null +++ b/unit-test-data/libstore/nar-info/pure.json @@ -0,0 +1,9 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ] +} diff --git a/unit-test-data/libstore/path-info/impure.json b/unit-test-data/libstore/path-info/impure.json new file mode 100644 index 000000000..0c452cc49 --- /dev/null +++ b/unit-test-data/libstore/path-info/impure.json @@ -0,0 +1,16 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "registrationTime": 23423, + "signatures": [ + "asdf", + "qwer" + ], + "ultimate": true +} diff --git a/unit-test-data/libstore/path-info/pure.json b/unit-test-data/libstore/path-info/pure.json new file mode 100644 index 000000000..955baec31 --- /dev/null +++ b/unit-test-data/libstore/path-info/pure.json @@ -0,0 +1,9 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ] +}