From 7f71fc7540502295202b075f82e89fcf993e7e3a Mon Sep 17 00:00:00 2001 From: Pol Dellaiera Date: Mon, 23 Oct 2023 23:46:06 +0200 Subject: [PATCH 01/55] fix: make sure `tar` reproducibility flags are set --- tests/functional/tarball.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functional/tarball.sh b/tests/functional/tarball.sh index 6e621a28c..e59ee400e 100644 --- a/tests/functional/tarball.sh +++ b/tests/functional/tarball.sh @@ -18,7 +18,7 @@ test_tarball() { local compressor="$2" tarball=$TEST_ROOT/tarball.tar$ext - (cd $TEST_ROOT && tar cf - tarball) | $compressor > $tarball + (cd $TEST_ROOT && GNUTAR_REPRODUCIBLE= tar --mtime=$tarroot/default.nix --owner=0 --group=0 --numeric-owner --sort=name -c -f - tarball) | $compressor > $tarball nix-env -f file://$tarball -qa --out-path | grepQuiet dependencies From b107431816fcbf364aeae6942cc9d1e709635a44 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 1 Nov 2023 11:15:21 -0400 Subject: [PATCH 02/55] Systematize characterization tests a bit more Deduplicating code moreover enforcing the pattern means: - It is easier to write new characterization tests because less boilerplate - It is harder to mess up new tests because there are fewer places to make mistakes. Co-authored-by: Jacek Galowicz --- src/libstore/tests/characterization.hh | 28 ------ src/libstore/tests/common-protocol.cc | 48 ++++------ src/libstore/tests/derivation.cc | 116 ++++++++----------------- src/libstore/tests/libstore.hh | 2 +- src/libstore/tests/protocol.hh | 61 +++++-------- src/libutil/tests/characterization.hh | 111 +++++++++++++++++++++++ 6 files changed, 183 insertions(+), 183 deletions(-) delete mode 100644 src/libstore/tests/characterization.hh create mode 100644 src/libutil/tests/characterization.hh diff --git a/src/libstore/tests/characterization.hh b/src/libstore/tests/characterization.hh deleted file mode 100644 index 46bf4b2e5..000000000 --- a/src/libstore/tests/characterization.hh +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once -///@file - -namespace nix { - -/** - * The path to the `unit-test-data` directory. See the contributing - * guide in the manual for further details. - */ -static Path getUnitTestData() { - return getEnv("_NIX_TEST_UNIT_DATA").value(); -} - -/** - * Whether we should update "golden masters" instead of running tests - * against them. See the contributing guide in the manual for further - * details. - */ -static bool testAccept() { - return getEnv("_NIX_TEST_ACCEPT") == "1"; -} - -constexpr std::string_view cannotReadGoldenMaster = - "Cannot read golden master because another test is also updating it"; - -constexpr std::string_view updatingGoldenMaster = - "Updating golden master"; -} diff --git a/src/libstore/tests/common-protocol.cc b/src/libstore/tests/common-protocol.cc index b3f4977d2..c09ac6a3e 100644 --- a/src/libstore/tests/common-protocol.cc +++ b/src/libstore/tests/common-protocol.cc @@ -20,16 +20,9 @@ public: * Golden test for `T` reading */ template - void readTest(PathView testStem, T value) + void readProtoTest(PathView testStem, const T & expected) { - if (testAccept()) - { - GTEST_SKIP() << cannotReadGoldenMaster; - } - else - { - auto encoded = readFile(goldenMaster(testStem)); - + CharacterizationTest::readTest(testStem, [&](const auto & encoded) { T got = ({ StringSource from { encoded }; CommonProto::Serialise::read( @@ -37,44 +30,33 @@ public: CommonProto::ReadConn { .from = from }); }); - ASSERT_EQ(got, value); - } + ASSERT_EQ(got, expected); + }); } /** * Golden test for `T` write */ template - void writeTest(PathView testStem, const T & value) + void writeProtoTest(PathView testStem, const T & decoded) { - auto file = goldenMaster(testStem); - - StringSink to; - CommonProto::write( - *store, - CommonProto::WriteConn { .to = to }, - value); - - if (testAccept()) - { - createDirs(dirOf(file)); - writeFile(file, to.s); - GTEST_SKIP() << updatingGoldenMaster; - } - else - { - auto expected = readFile(file); - ASSERT_EQ(to.s, expected); - } + CharacterizationTest::writeTest(testStem, [&]() -> std::string { + StringSink to; + CommonProto::Serialise::write( + *store, + CommonProto::WriteConn { .to = to }, + decoded); + return to.s; + }); } }; #define CHARACTERIZATION_TEST(NAME, STEM, VALUE) \ TEST_F(CommonProtoTest, NAME ## _read) { \ - readTest(STEM, VALUE); \ + readProtoTest(STEM, VALUE); \ } \ TEST_F(CommonProtoTest, NAME ## _write) { \ - writeTest(STEM, VALUE); \ + writeProtoTest(STEM, VALUE); \ } CHARACTERIZATION_TEST( diff --git a/src/libstore/tests/derivation.cc b/src/libstore/tests/derivation.cc index ca0cdff71..29d5693db 100644 --- a/src/libstore/tests/derivation.cc +++ b/src/libstore/tests/derivation.cc @@ -11,20 +11,20 @@ namespace nix { using nlohmann::json; -class DerivationTest : public LibStoreTest +class DerivationTest : public CharacterizationTest, public LibStoreTest { + Path unitTestData = getUnitTestData() + "/libstore/derivation"; + public: + Path goldenMaster(std::string_view testStem) const override { + return unitTestData + "/" + testStem; + } + /** * We set these in tests rather than the regular globals so we don't have * to worry about race conditions if the tests run concurrently. */ ExperimentalFeatureSettings mockXpSettings; - - Path unitTestData = getUnitTestData() + "/libstore/derivation"; - - Path goldenMaster(std::string_view testStem) { - return unitTestData + "/" + testStem; - } }; class CaDerivationTest : public DerivationTest @@ -73,14 +73,8 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) { #define TEST_JSON(FIXTURE, NAME, VAL, DRV_NAME, OUTPUT_NAME) \ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _from_json) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = json::parse( \ - readFile(goldenMaster("output-" #NAME ".json"))); \ + readTest("output-" #NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ DerivationOutput got = DerivationOutput::fromJSON( \ *store, \ DRV_NAME, \ @@ -89,28 +83,20 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) { mockXpSettings); \ DerivationOutput expected { VAL }; \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _to_json) { \ - auto file = goldenMaster("output-" #NAME ".json"); \ - \ - json got = DerivationOutput { VAL }.toJSON( \ - *store, \ - DRV_NAME, \ - OUTPUT_NAME); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got.dump(2) + "\n"); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = json::parse(readFile(file)); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest("output-" #NAME ".json", [&]() -> json { \ + return DerivationOutput { (VAL) }.toJSON( \ + *store, \ + (DRV_NAME), \ + (OUTPUT_NAME)); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ } TEST_JSON(DerivationTest, inputAddressed, @@ -167,50 +153,30 @@ TEST_JSON(ImpureDerivationTest, impure, #define TEST_JSON(FIXTURE, NAME, VAL) \ TEST_F(FIXTURE, Derivation_ ## NAME ## _from_json) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = json::parse( \ - readFile(goldenMaster( #NAME ".json"))); \ + readTest(#NAME ".json", [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ Derivation expected { VAL }; \ Derivation got = Derivation::fromJSON( \ *store, \ encoded, \ mockXpSettings); \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, Derivation_ ## NAME ## _to_json) { \ - auto file = goldenMaster( #NAME ".json"); \ - \ - json got = Derivation { VAL }.toJSON(*store); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got.dump(2) + "\n"); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = json::parse(readFile(file)); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest(#NAME ".json", [&]() -> json { \ + return Derivation { VAL }.toJSON(*store); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ } #define TEST_ATERM(FIXTURE, NAME, VAL, DRV_NAME) \ TEST_F(FIXTURE, Derivation_ ## NAME ## _from_aterm) { \ - if (testAccept()) \ - { \ - GTEST_SKIP() << cannotReadGoldenMaster; \ - } \ - else \ - { \ - auto encoded = readFile(goldenMaster( #NAME ".drv")); \ + readTest(#NAME ".drv", [&](auto encoded) { \ Derivation expected { VAL }; \ auto got = parseDerivation( \ *store, \ @@ -219,25 +185,13 @@ TEST_JSON(ImpureDerivationTest, impure, mockXpSettings); \ ASSERT_EQ(got.toJSON(*store), expected.toJSON(*store)) ; \ ASSERT_EQ(got, expected); \ - } \ + }); \ } \ \ TEST_F(FIXTURE, Derivation_ ## NAME ## _to_aterm) { \ - auto file = goldenMaster( #NAME ".drv"); \ - \ - auto got = (VAL).unparse(*store, false); \ - \ - if (testAccept()) \ - { \ - createDirs(dirOf(file)); \ - writeFile(file, got); \ - GTEST_SKIP() << updatingGoldenMaster; \ - } \ - else \ - { \ - auto expected = readFile(file); \ - ASSERT_EQ(got, expected); \ - } \ + writeTest(#NAME ".drv", [&]() -> std::string { \ + return (VAL).unparse(*store, false); \ + }); \ } Derivation makeSimpleDrv(const Store & store) { diff --git a/src/libstore/tests/libstore.hh b/src/libstore/tests/libstore.hh index ef93457b5..78b162b95 100644 --- a/src/libstore/tests/libstore.hh +++ b/src/libstore/tests/libstore.hh @@ -8,7 +8,7 @@ namespace nix { -class LibStoreTest : public ::testing::Test { +class LibStoreTest : public virtual ::testing::Test { public: static void SetUpTestSuite() { initLibStore(); diff --git a/src/libstore/tests/protocol.hh b/src/libstore/tests/protocol.hh index 7fdd3e11c..0378b3e1f 100644 --- a/src/libstore/tests/protocol.hh +++ b/src/libstore/tests/protocol.hh @@ -7,12 +7,11 @@ namespace nix { template -class ProtoTest : public LibStoreTest +class ProtoTest : public CharacterizationTest, public LibStoreTest { -protected: Path unitTestData = getUnitTestData() + "/libstore/" + protocolDir; - Path goldenMaster(std::string_view testStem) { + Path goldenMaster(std::string_view testStem) const override { return unitTestData + "/" + testStem + ".bin"; } }; @@ -25,18 +24,11 @@ public: * Golden test for `T` reading */ template - void readTest(PathView testStem, typename Proto::Version version, T value) + void readProtoTest(PathView testStem, typename Proto::Version version, T expected) { - if (testAccept()) - { - GTEST_SKIP() << cannotReadGoldenMaster; - } - else - { - auto expected = readFile(ProtoTest::goldenMaster(testStem)); - + CharacterizationTest::readTest(testStem, [&](const auto & encoded) { T got = ({ - StringSource from { expected }; + StringSource from { encoded }; Proto::template Serialise::read( *LibStoreTest::store, typename Proto::ReadConn { @@ -45,47 +37,36 @@ public: }); }); - ASSERT_EQ(got, value); - } + ASSERT_EQ(got, expected); + }); } /** * Golden test for `T` write */ template - void writeTest(PathView testStem, typename Proto::Version version, const T & value) + void writeProtoTest(PathView testStem, typename Proto::Version version, const T & decoded) { - auto file = ProtoTest::goldenMaster(testStem); - - StringSink to; - Proto::write( - *LibStoreTest::store, - typename Proto::WriteConn { - .to = to, - .version = version, - }, - value); - - if (testAccept()) - { - createDirs(dirOf(file)); - writeFile(file, to.s); - GTEST_SKIP() << updatingGoldenMaster; - } - else - { - auto expected = readFile(file); - ASSERT_EQ(to.s, expected); - } + CharacterizationTest::writeTest(testStem, [&]() { + StringSink to; + Proto::template Serialise::write( + *LibStoreTest::store, + typename Proto::WriteConn { + .to = to, + .version = version, + }, + decoded); + return std::move(to.s); + }); } }; #define VERSIONED_CHARACTERIZATION_TEST(FIXTURE, NAME, STEM, VERSION, VALUE) \ TEST_F(FIXTURE, NAME ## _read) { \ - readTest(STEM, VERSION, VALUE); \ + readProtoTest(STEM, VERSION, VALUE); \ } \ TEST_F(FIXTURE, NAME ## _write) { \ - writeTest(STEM, VERSION, VALUE); \ + writeProtoTest(STEM, VERSION, VALUE); \ } } diff --git a/src/libutil/tests/characterization.hh b/src/libutil/tests/characterization.hh new file mode 100644 index 000000000..10c8b4f7e --- /dev/null +++ b/src/libutil/tests/characterization.hh @@ -0,0 +1,111 @@ +#pragma once +///@file + +#include + +#include "types.hh" + +namespace nix { + +/** + * The path to the `unit-test-data` directory. See the contributing + * guide in the manual for further details. + */ +static Path getUnitTestData() { + return getEnv("_NIX_TEST_UNIT_DATA").value(); +} + +/** + * Whether we should update "golden masters" instead of running tests + * against them. See the contributing guide in the manual for further + * details. + */ +static bool testAccept() { + return getEnv("_NIX_TEST_ACCEPT") == "1"; +} + +/** + * Mixin class for writing characterization tests + */ +class CharacterizationTest : public virtual ::testing::Test +{ +protected: + /** + * While the "golden master" for this characterization test is + * located. It should not be shared with any other test. + */ + virtual Path goldenMaster(PathView testStem) const = 0; + +public: + /** + * Golden test for reading + * + * @param test hook that takes the contents of the file and does the + * actual work + */ + void readTest(PathView testStem, auto && test) + { + auto file = goldenMaster(testStem); + + if (testAccept()) + { + GTEST_SKIP() + << "Cannot read golden master " + << file + << "because another test is also updating it"; + } + else + { + test(readFile(file)); + } + } + + /** + * Golden test for writing + * + * @param test hook that produces contents of the file and does the + * actual work + */ + template + void writeTest( + PathView testStem, + std::invocable<> auto && test, + std::invocable auto && readFile2, + std::invocable auto && writeFile2) + { + auto file = goldenMaster(testStem); + + T got = test(); + + if (testAccept()) + { + createDirs(dirOf(file)); + writeFile2(file, got); + GTEST_SKIP() + << "Updating golden master " + << file; + } + else + { + T expected = readFile2(file); + ASSERT_EQ(got, expected); + } + } + + /** + * Specialize to `std::string` + */ + void writeTest(PathView testStem, auto && test) + { + writeTest( + testStem, test, + [](const Path & f) -> std::string { + return readFile(f); + }, + [](const Path & f, const std::string & c) { + return writeFile(f, c); + }); + } +}; + +} From d15c3a33e680228c9deaa6d0898d4680cdc8dbc3 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 1 Nov 2023 16:11:20 -0400 Subject: [PATCH 03/55] Don't use `std::invocable` C++ concept yet It s not supported on all platforms yet. Can revert this once it is. --- src/libstore/tests/derivation.cc | 4 ++-- src/libutil/tests/characterization.hh | 12 ++++-------- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/src/libstore/tests/derivation.cc b/src/libstore/tests/derivation.cc index 29d5693db..7becfa5ab 100644 --- a/src/libstore/tests/derivation.cc +++ b/src/libstore/tests/derivation.cc @@ -87,7 +87,7 @@ TEST_F(DynDerivationTest, BadATerm_oldVersionDynDeps) { } \ \ TEST_F(FIXTURE, DerivationOutput_ ## NAME ## _to_json) { \ - writeTest("output-" #NAME ".json", [&]() -> json { \ + writeTest("output-" #NAME ".json", [&]() -> json { \ return DerivationOutput { (VAL) }.toJSON( \ *store, \ (DRV_NAME), \ @@ -165,7 +165,7 @@ TEST_JSON(ImpureDerivationTest, impure, } \ \ TEST_F(FIXTURE, Derivation_ ## NAME ## _to_json) { \ - writeTest(#NAME ".json", [&]() -> json { \ + writeTest(#NAME ".json", [&]() -> json { \ return Derivation { VAL }.toJSON(*store); \ }, [](const auto & file) { \ return json::parse(readFile(file)); \ diff --git a/src/libutil/tests/characterization.hh b/src/libutil/tests/characterization.hh index 10c8b4f7e..6698c5239 100644 --- a/src/libutil/tests/characterization.hh +++ b/src/libutil/tests/characterization.hh @@ -66,16 +66,12 @@ public: * @param test hook that produces contents of the file and does the * actual work */ - template void writeTest( - PathView testStem, - std::invocable<> auto && test, - std::invocable auto && readFile2, - std::invocable auto && writeFile2) + PathView testStem, auto && test, auto && readFile2, auto && writeFile2) { auto file = goldenMaster(testStem); - T got = test(); + auto got = test(); if (testAccept()) { @@ -87,7 +83,7 @@ public: } else { - T expected = readFile2(file); + decltype(got) expected = readFile2(file); ASSERT_EQ(got, expected); } } @@ -97,7 +93,7 @@ public: */ void writeTest(PathView testStem, auto && test) { - writeTest( + writeTest( testStem, test, [](const Path & f) -> std::string { return readFile(f); From e5908212e25f2cb7a36ec176a1c7fcb2d522088b Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Nov 2023 11:03:58 +0100 Subject: [PATCH 04/55] Fix nar-access test on macOS --- tests/functional/nar-access.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/functional/nar-access.sh b/tests/functional/nar-access.sh index 218b521fb..87981e7d9 100644 --- a/tests/functional/nar-access.sh +++ b/tests/functional/nar-access.sh @@ -27,9 +27,8 @@ diff -u baz.cat-nar $storePath/foo/baz # Check that 'nix store cat' fails on invalid store paths. invalidPath="$(dirname $storePath)/99999999999999999999999999999999-foo" -mv $storePath $invalidPath +cp -r $storePath $invalidPath expect 1 nix store cat $invalidPath/foo/baz -mv $invalidPath $storePath # Test --json. diff -u \ From 55dd1244d280d768bfebb8ca2ec93e061d7aa4eb Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Nov 2023 11:39:50 +0100 Subject: [PATCH 05/55] parseDerivation(): Fix warning about uninitialized 'version' variable --- src/libstore/derivations.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/libstore/derivations.cc b/src/libstore/derivations.cc index efdad18e1..1fecd1c97 100644 --- a/src/libstore/derivations.cc +++ b/src/libstore/derivations.cc @@ -352,7 +352,7 @@ Derivation parseDerivation( expect(str, "erive("); version = DerivationATermVersion::Traditional; break; - case 'r': + case 'r': { expect(str, "rvWithVersion("); auto versionS = parseString(str); if (versionS == "xp-dyn-drv") { @@ -365,6 +365,9 @@ Derivation parseDerivation( expect(str, ","); break; } + default: + throw Error("derivation does not start with 'Derive' or 'DrvWithVersion'"); + } /* Parse the list of outputs. */ expect(str, "["); From b0455e9931fbcd996b1b240a4513132c36cf852c Mon Sep 17 00:00:00 2001 From: Eelco Dolstra Date: Fri, 3 Nov 2023 11:58:47 +0100 Subject: [PATCH 06/55] Fix uninitialized variable warnings on i686-linux https://hydra.nixos.org/build/239849607 --- src/libcmd/command.cc | 4 ++-- src/libcmd/installables.cc | 2 +- src/libstore/store-api.cc | 2 +- src/nix-build/nix-build.cc | 2 +- src/nix-env/nix-env.cc | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/libcmd/command.cc b/src/libcmd/command.cc index a88ba8134..de9f546fc 100644 --- a/src/libcmd/command.cc +++ b/src/libcmd/command.cc @@ -175,7 +175,7 @@ void BuiltPathsCommand::run(ref store, Installables && installables) throw UsageError("'--all' does not expect arguments"); // XXX: Only uses opaque paths, ignores all the realisations for (auto & p : store->queryAllValidPaths()) - paths.push_back(BuiltPath::Opaque{p}); + paths.emplace_back(BuiltPath::Opaque{p}); } else { paths = Installable::toBuiltPaths(getEvalStore(), store, realiseMode, operateOn, installables); if (recursive) { @@ -188,7 +188,7 @@ void BuiltPathsCommand::run(ref store, Installables && installables) } store->computeFSClosure(pathsRoots, pathsClosure); for (auto & path : pathsClosure) - paths.push_back(BuiltPath::Opaque{path}); + paths.emplace_back(BuiltPath::Opaque{path}); } } diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 3aff601e0..bc0b8a988 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -663,7 +663,7 @@ BuiltPaths Installable::toBuiltPaths( BuiltPaths res; for (auto & drvPath : Installable::toDerivations(store, installables, true)) - res.push_back(BuiltPath::Opaque{drvPath}); + res.emplace_back(BuiltPath::Opaque{drvPath}); return res; } } diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index ac96e8bb1..646b0ec7d 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -819,7 +819,7 @@ void Store::substitutePaths(const StorePathSet & paths) std::vector paths2; for (auto & path : paths) if (!path.isDerivation()) - paths2.push_back(DerivedPath::Opaque{path}); + paths2.emplace_back(DerivedPath::Opaque{path}); uint64_t downloadSize, narSize; StorePathSet willBuild, willSubstitute, unknown; queryMissing(paths2, diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index e62c4f6b1..60bc08146 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -449,7 +449,7 @@ static void main_nix_build(int argc, char * * argv) } } for (const auto & src : drv.inputSrcs) { - pathsToBuild.push_back(DerivedPath::Opaque{src}); + pathsToBuild.emplace_back(DerivedPath::Opaque{src}); pathsToCopy.insert(src); } diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 01742daa8..25068f801 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -481,12 +481,12 @@ static void printMissing(EvalState & state, DrvInfos & elems) std::vector targets; for (auto & i : elems) if (auto drvPath = i.queryDrvPath()) - targets.push_back(DerivedPath::Built{ + targets.emplace_back(DerivedPath::Built{ .drvPath = makeConstantStorePathRef(*drvPath), .outputs = OutputsSpec::All { }, }); else - targets.push_back(DerivedPath::Opaque{ + targets.emplace_back(DerivedPath::Opaque{ .path = i.queryOutPath(), }); From 6df32889a51510dff44c776fa312b7ba61ab8edf Mon Sep 17 00:00:00 2001 From: BootRhetoric <110117466+BootRhetoric@users.noreply.github.com> Date: Fri, 20 Oct 2023 21:16:56 +0200 Subject: [PATCH 07/55] Add git commit verification input attributes This implements the git input attributes `verifyCommit`, `keytype`, `publicKey` and `publicKeys` as experimental feature `verified-fetches`. `publicKeys` should be a json string. This representation was chosen because all attributes must be of type bool, int or string so they can be included in flake uris (see definition of fetchers::Attr). --- src/libfetchers/fetchers.cc | 5 ++ src/libfetchers/fetchers.hh | 9 +++ src/libfetchers/git.cc | 104 +++++++++++++++++++++++++-- src/libutil/experimental-features.cc | 11 ++- src/libutil/experimental-features.hh | 1 + 5 files changed, 124 insertions(+), 6 deletions(-) diff --git a/src/libfetchers/fetchers.cc b/src/libfetchers/fetchers.cc index 92692d23a..895515327 100644 --- a/src/libfetchers/fetchers.cc +++ b/src/libfetchers/fetchers.cc @@ -360,4 +360,9 @@ std::optional InputScheme::experimentalFeature() const return {}; } +std::string publicKeys_to_string(const std::vector& publicKeys) +{ + return ((nlohmann::json) publicKeys).dump(); +} + } diff --git a/src/libfetchers/fetchers.hh b/src/libfetchers/fetchers.hh index 7d768bac1..a056c8939 100644 --- a/src/libfetchers/fetchers.hh +++ b/src/libfetchers/fetchers.hh @@ -182,4 +182,13 @@ void registerInputScheme(std::shared_ptr && fetcher); nlohmann::json dumpRegisterInputSchemeInfo(); +struct PublicKey +{ + std::string type = "ssh-ed25519"; + std::string key; +}; +NLOHMANN_DEFINE_TYPE_NON_INTRUSIVE_WITH_DEFAULT(PublicKey, type, key) + +std::string publicKeys_to_string(const std::vector&); + } diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index d625fe01e..51e551879 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -143,6 +143,69 @@ struct WorkdirInfo bool hasHead = false; }; +std::vector getPublicKeys(const Attrs & attrs) { + std::vector publicKeys; + if (attrs.contains("publicKeys")) { + nlohmann::json publicKeysJson = nlohmann::json::parse(getStrAttr(attrs, "publicKeys")); + ensureType(publicKeysJson, nlohmann::json::value_t::array); + publicKeys = publicKeysJson.get>(); + } + else { + publicKeys = {}; + } + if (attrs.contains("publicKey")) + publicKeys.push_back(PublicKey{maybeGetStrAttr(attrs, "keytype").value_or("ssh-ed25519"),getStrAttr(attrs, "publicKey")}); + return publicKeys; +} + +void doCommitVerification(const Path repoDir, const Path gitDir, const std::string rev, const std::vector& publicKeys) { + // Create ad-hoc allowedSignersFile and populate it with publicKeys + auto allowedSignersFile = createTempFile().second; + std::string allowedSigners; + for (const PublicKey& k : publicKeys) { + if (k.type != "ssh-dsa" + && k.type != "ssh-ecdsa" + && k.type != "ssh-ecdsa-sk" + && k.type != "ssh-ed25519" + && k.type != "ssh-ed25519-sk" + && k.type != "ssh-rsa") + warn("Unknow keytype: %s\n" + "Please use one of\n" + "- ssh-dsa\n" + "- ssh-ecdsa\n" + "- ssh-ecdsa-sk\n" + "- ssh-ed25519\n" + "- ssh-ed25519-sk\n" + "- ssh-rsa", k.type); + allowedSigners += "* " + k.type + " " + k.key + "\n"; + } + writeFile(allowedSignersFile, allowedSigners); + + // Run verification command + auto [status, output] = runProgram(RunOptions { + .program = "git", + .args = {"-c", "gpg.ssh.allowedSignersFile=" + allowedSignersFile, "-C", repoDir, + "--git-dir", gitDir, "verify-commit", rev}, + .mergeStderrToStdout = true, + }); + + /* Evaluate result through status code and checking if public key fingerprints appear on stderr + * This is neccessary because the git command might also succeed due to the commit being signed by gpg keys + * that are present in the users key agent. */ + std::string re = R"(Good "git" signature for \* with .* key SHA256:[)"; + for (const PublicKey& k : publicKeys){ + // Calculate sha256 fingerprint from public key and escape the regex symbol '+' to match the key literally + auto fingerprint = trim(hashString(htSHA256, base64Decode(k.key)).to_string(nix::HashFormat::Base64, false), "="); + auto escaped_fingerprint = std::regex_replace(fingerprint, std::regex("\\+"), "\\+" ); + re += "(" + escaped_fingerprint + ")"; + } + re += "]"; + if (status == 0 && std::regex_search(output, std::regex(re))) + printTalkative("Commit signature verification on commit %s succeeded", rev); + else + throw Error("Commit signature verification on commit %s failed: \n%s", rev, output); +} + // Returns whether a git workdir is clean and has commits. WorkdirInfo getWorkdirInfo(const Input & input, const Path & workdir) { @@ -272,9 +335,9 @@ struct GitInputScheme : InputScheme attrs.emplace("type", "git"); for (auto & [name, value] : url.query) { - if (name == "rev" || name == "ref") + if (name == "rev" || name == "ref" || name == "keytype" || name == "publicKey" || name == "publicKeys") attrs.emplace(name, value); - else if (name == "shallow" || name == "submodules" || name == "allRefs") + else if (name == "shallow" || name == "submodules" || name == "allRefs" || name == "verifyCommit") attrs.emplace(name, Explicit { value == "1" }); else url2.query.emplace(name, value); @@ -306,14 +369,26 @@ struct GitInputScheme : InputScheme "name", "dirtyRev", "dirtyShortRev", + "verifyCommit", + "keytype", + "publicKey", + "publicKeys", }; } std::optional inputFromAttrs(const Attrs & attrs) const override { + for (auto & [name, _] : attrs) + if (name == "verifyCommit" + || name == "keytype" + || name == "publicKey" + || name == "publicKeys") + experimentalFeatureSettings.require(Xp::VerifiedFetches); + maybeGetBoolAttr(attrs, "shallow"); maybeGetBoolAttr(attrs, "submodules"); maybeGetBoolAttr(attrs, "allRefs"); + maybeGetBoolAttr(attrs, "verifyCommit"); if (auto ref = maybeGetStrAttr(attrs, "ref")) { if (std::regex_search(*ref, badGitRefRegex)) @@ -336,6 +411,15 @@ struct GitInputScheme : InputScheme if (auto ref = input.getRef()) url.query.insert_or_assign("ref", *ref); if (maybeGetBoolAttr(input.attrs, "shallow").value_or(false)) url.query.insert_or_assign("shallow", "1"); + if (maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(false)) + url.query.insert_or_assign("verifyCommit", "1"); + auto publicKeys = getPublicKeys(input.attrs); + if (publicKeys.size() == 1) { + url.query.insert_or_assign("keytype", publicKeys.at(0).type); + url.query.insert_or_assign("publicKey", publicKeys.at(0).key); + } + else if (publicKeys.size() > 1) + url.query.insert_or_assign("publicKeys", publicKeys_to_string(publicKeys)); return url; } @@ -425,6 +509,8 @@ struct GitInputScheme : InputScheme bool shallow = maybeGetBoolAttr(input.attrs, "shallow").value_or(false); bool submodules = maybeGetBoolAttr(input.attrs, "submodules").value_or(false); bool allRefs = maybeGetBoolAttr(input.attrs, "allRefs").value_or(false); + std::vector publicKeys = getPublicKeys(input.attrs); + bool verifyCommit = maybeGetBoolAttr(input.attrs, "verifyCommit").value_or(!publicKeys.empty()); std::string cacheType = "git"; if (shallow) cacheType += "-shallow"; @@ -445,6 +531,8 @@ struct GitInputScheme : InputScheme {"type", cacheType}, {"name", name}, {"rev", input.getRev()->gitRev()}, + {"verifyCommit", verifyCommit}, + {"publicKeys", publicKeys_to_string(publicKeys)}, }); }; @@ -467,12 +555,15 @@ struct GitInputScheme : InputScheme auto [isLocal, actualUrl_] = getActualUrl(input); auto actualUrl = actualUrl_; // work around clang bug - /* If this is a local directory and no ref or revision is given, + /* If this is a local directory, no ref or revision is given and no signature verification is needed, allow fetching directly from a dirty workdir. */ if (!input.getRef() && !input.getRev() && isLocal) { auto workdirInfo = getWorkdirInfo(input, actualUrl); if (!workdirInfo.clean) { - return fetchFromWorkdir(store, input, actualUrl, workdirInfo); + if (verifyCommit) + throw Error("Can't fetch from a dirty workdir with commit signature verification enabled."); + else + return fetchFromWorkdir(store, input, actualUrl, workdirInfo); } } @@ -480,6 +571,8 @@ struct GitInputScheme : InputScheme {"type", cacheType}, {"name", name}, {"url", actualUrl}, + {"verifyCommit", verifyCommit}, + {"publicKeys", publicKeys_to_string(publicKeys)}, }); Path repoDir; @@ -637,6 +730,9 @@ struct GitInputScheme : InputScheme ); } + if (verifyCommit) + doCommitVerification(repoDir, gitDir, input.getRev()->gitRev(), publicKeys); + if (submodules) { Path tmpGitDir = createTempDir(); AutoDelete delTmpGitDir(tmpGitDir, true); diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 74af9aae0..47edca3a5 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -12,7 +12,7 @@ struct ExperimentalFeatureDetails std::string_view description; }; -constexpr std::array xpFeatureDetails = {{ +constexpr std::array xpFeatureDetails = {{ { .tag = Xp::CaDerivations, .name = "ca-derivations", @@ -227,7 +227,14 @@ constexpr std::array xpFeatureDetails = {{ .description = R"( Allow the use of the [impure-env](@docroot@/command-ref/conf-file.md#conf-impure-env) setting. )", - } + }, + { + .tag = Xp::VerifiedFetches, + .name = "verified-fetches", + .description = R"( + Enables verification of git commit signatures through the [`fetchGit`](@docroot@/language/builtins.md#builtins-fetchGit) built-in. + )", + }, }}; static_assert( diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh index e02f8353e..f005cc9ee 100644 --- a/src/libutil/experimental-features.hh +++ b/src/libutil/experimental-features.hh @@ -32,6 +32,7 @@ enum struct ExperimentalFeature ParseTomlTimestamps, ReadOnlyLocalStore, ConfigurableImpureEnv, + VerifiedFetches, }; /** From 098f0615c9401414a76e66653fbf4c9dd30d55a7 Mon Sep 17 00:00:00 2001 From: BootRhetoric <110117466+BootRhetoric@users.noreply.github.com> Date: Fri, 20 Oct 2023 21:17:14 +0200 Subject: [PATCH 08/55] fetchGit and flake: add publicKeys list input This adds publicKeys as an optional fetcher input attribute to flakes and builtins.fetchGit to provide a nix interface for the json-encoded `publicKeys` attribute of the git fetcher. Co-authored-by: Valentin Gagarin --- doc/manual/src/release-notes/rl-next.md | 3 +- src/libexpr/flake/flake.cc | 10 ++++- src/libexpr/primops/fetchTree.cc | 56 +++++++++++++++++++++++++ src/libfetchers/git.cc | 14 +++---- 4 files changed, 73 insertions(+), 10 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 3cfb53998..8cd69f8fd 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -16,7 +16,6 @@ - `builtins.fetchTree` is now marked as stable. - - The interface for creating and updating lock files has been overhauled: - [`nix flake lock`](@docroot@/command-ref/new-cli/nix3-flake-lock.md) only creates lock files and adds missing inputs now. @@ -29,3 +28,5 @@ - The flake-specific flags `--recreate-lock-file` and `--update-input` have been removed from all commands operating on installables. They are superceded by `nix flake update`. + +- Commit signature verification for the [`builtins.fetchGit`](@docroot@/language/builtins.md#builtins-fetchGit) is added as the new [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index 70ae7b584..ded132695 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -8,6 +8,7 @@ #include "fetchers.hh" #include "finally.hh" #include "fetch-settings.hh" +#include "value-to-json.hh" namespace nix { @@ -140,8 +141,13 @@ static FlakeInput parseFlakeInput(EvalState & state, attrs.emplace(state.symbols[attr.name], (long unsigned int)attr.value->integer); break; default: - throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", - state.symbols[attr.name], showType(*attr.value)); + if (attr.name == state.symbols.create("publicKeys")) { + experimentalFeatureSettings.require(Xp::VerifiedFetches); + NixStringContext emptyContext = {}; + attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, emptyContext).dump()); + } else + throw TypeError("flake input attribute '%s' is %s while a string, Boolean, or integer is expected", + state.symbols[attr.name], showType(*attr.value)); } #pragma GCC diagnostic pop } diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 767f559be..3717b9022 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -7,6 +7,7 @@ #include "registry.hh" #include "tarball.hh" #include "url.hh" +#include "value-to-json.hh" #include #include @@ -125,6 +126,10 @@ static void fetchTree( attrs.emplace(state.symbols[attr.name], Explicit{attr.value->boolean}); else if (attr.value->type() == nInt) attrs.emplace(state.symbols[attr.name], uint64_t(attr.value->integer)); + else if (state.symbols[attr.name] == "publicKeys") { + experimentalFeatureSettings.require(Xp::VerifiedFetches); + attrs.emplace(state.symbols[attr.name], printValueAsJSON(state, true, *attr.value, pos, context).dump()); + } else state.debugThrowLastTrace(TypeError("fetchTree argument '%s' is %s while a string, Boolean or integer is expected", state.symbols[attr.name], showType(*attr.value))); @@ -427,6 +432,42 @@ static RegisterPrimOp primop_fetchGit({ With this argument being true, it's possible to load a `rev` from *any* `ref` (by default only `rev`s from the specified `ref` are supported). + - `verifyCommit` (default: `true` if `publicKey` or `publicKeys` are provided, otherwise `false`) + + Whether to check `rev` for a signature matching `publicKey` or `publicKeys`. + If `verifyCommit` is enabled, then `fetchGit` cannot use a local repository with uncommitted changes. + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `publicKey` + + The public key against which `rev` is verified if `verifyCommit` is enabled. + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `keytype` (default: `"ssh-ed25519"`) + + The key type of `publicKey`. + Possible values: + - `"ssh-dsa"` + - `"ssh-ecdsa"` + - `"ssh-ecdsa-sk"` + - `"ssh-ed25519"` + - `"ssh-ed25519-sk"` + - `"ssh-rsa"` + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + - `publicKeys` + + The public keys against which `rev` is verified if `verifyCommit` is enabled. + Must be given as a list of attribute sets with the following form: + ```nix + { + key = ""; + type = ""; # optional, default: "ssh-ed25519" + } + ``` + Requires the [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + + Here are some examples of how to use `fetchGit`. - To fetch a private repository over SSH: @@ -501,6 +542,21 @@ static RegisterPrimOp primop_fetchGit({ } ``` + - To verify the commit signature: + + ```nix + builtins.fetchGit { + url = "ssh://git@github.com/nixos/nix.git"; + verifyCommit = true; + publicKeys = [ + { + type = "ssh-ed25519"; + key = "AAAAC3NzaC1lZDI1NTE5AAAAIArPKULJOid8eS6XETwUjO48/HKBWl7FTCK0Z//fplDi"; + } + ]; + } + ``` + Nix will refetch the branch according to the [`tarball-ttl`](@docroot@/command-ref/conf-file.md#conf-tarball-ttl) setting. This behavior is disabled in [pure evaluation mode](@docroot@/command-ref/conf-file.md#conf-pure-eval). diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 51e551879..72fba0582 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -169,14 +169,14 @@ void doCommitVerification(const Path repoDir, const Path gitDir, const std::stri && k.type != "ssh-ed25519" && k.type != "ssh-ed25519-sk" && k.type != "ssh-rsa") - warn("Unknow keytype: %s\n" + warn("Unknown keytype: %s\n" "Please use one of\n" "- ssh-dsa\n" - "- ssh-ecdsa\n" - "- ssh-ecdsa-sk\n" - "- ssh-ed25519\n" - "- ssh-ed25519-sk\n" - "- ssh-rsa", k.type); + " ssh-ecdsa\n" + " ssh-ecdsa-sk\n" + " ssh-ed25519\n" + " ssh-ed25519-sk\n" + " ssh-rsa", k.type); allowedSigners += "* " + k.type + " " + k.key + "\n"; } writeFile(allowedSignersFile, allowedSigners); @@ -201,7 +201,7 @@ void doCommitVerification(const Path repoDir, const Path gitDir, const std::stri } re += "]"; if (status == 0 && std::regex_search(output, std::regex(re))) - printTalkative("Commit signature verification on commit %s succeeded", rev); + printTalkative("Signature verification on commit %s succeeded", rev); else throw Error("Commit signature verification on commit %s failed: \n%s", rev, output); } From 271932782dd3d44e0e238bd3234ca1e97996cfea Mon Sep 17 00:00:00 2001 From: BootRhetoric <110117466+BootRhetoric@users.noreply.github.com> Date: Fri, 20 Oct 2023 21:18:01 +0200 Subject: [PATCH 09/55] fetchGit and flake: add commit signature verification tests This adds simple tests of the commit signature verification mechanism of fetchGit and its flake input wrapper. OpenSSH is added to the build dependencies since it's needed to create a key when testing the functionality. It is neither a built- nor a runtime dependency. --- flake.nix | 1 + tests/functional/fetchGitVerification.sh | 76 ++++++++++++++++++++++++ tests/functional/local.mk | 1 + 3 files changed, 78 insertions(+) create mode 100644 tests/functional/fetchGitVerification.sh diff --git a/flake.nix b/flake.nix index 7cc4ed7fe..51d818423 100644 --- a/flake.nix +++ b/flake.nix @@ -185,6 +185,7 @@ buildPackages.git buildPackages.mercurial # FIXME: remove? only needed for tests buildPackages.jq # Also for custom mdBook preprocessor. + buildPackages.openssh # only needed for tests (ssh-keygen) ] ++ lib.optionals stdenv.hostPlatform.isLinux [(buildPackages.util-linuxMinimal or buildPackages.utillinuxMinimal)]; diff --git a/tests/functional/fetchGitVerification.sh b/tests/functional/fetchGitVerification.sh new file mode 100644 index 000000000..4d9209498 --- /dev/null +++ b/tests/functional/fetchGitVerification.sh @@ -0,0 +1,76 @@ +source common.sh + +requireGit +[[ $(type -p ssh-keygen) ]] || skipTest "ssh-keygen not installed" # require ssh-keygen + +enableFeatures "verified-fetches" + +clearStore + +repo="$TEST_ROOT/git" + +# generate signing keys +keysDir=$TEST_ROOT/.ssh +mkdir -p "$keysDir" +ssh-keygen -f "$keysDir/testkey1" -t ed25519 -P "" -C "test key 1" +key1File="$keysDir/testkey1.pub" +publicKey1=$(awk '{print $2}' "$key1File") +ssh-keygen -f "$keysDir/testkey2" -t rsa -P "" -C "test key 2" +key2File="$keysDir/testkey2.pub" +publicKey2=$(awk '{print $2}' "$key2File") + +git init $repo +git -C $repo config user.email "foobar@example.com" +git -C $repo config user.name "Foobar" +git -C $repo config gpg.format ssh + +echo 'hello' > $repo/text +git -C $repo add text +git -C $repo -c "user.signingkey=$key1File" commit -S -m 'initial commit' + +out=$(nix eval --impure --raw --expr "builtins.fetchGit { url = \"file://$repo\"; keytype = \"ssh-rsa\"; publicKey = \"$publicKey2\"; }" 2>&1) || status=$? +[[ $status == 1 ]] +[[ $out =~ 'No principal matched.' ]] +[[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKey = \"$publicKey1\"; } + \"/text\")") = 'hello' ]] + +echo 'hello world' > $repo/text +git -C $repo add text +git -C $repo -c "user.signingkey=$key2File" commit -S -m 'second commit' + +[[ $(nix eval --impure --raw --expr "builtins.readFile (builtins.fetchGit { url = \"file://$repo\"; publicKeys = [{key = \"$publicKey1\";} {type = \"ssh-rsa\"; key = \"$publicKey2\";}]; } + \"/text\")") = 'hello world' ]] + +# Flake input test +flakeDir="$TEST_ROOT/flake" +mkdir -p "$flakeDir" +cat > "$flakeDir/flake.nix" < "$flakeDir/flake.nix" <&1) || status=$? +[[ $status == 1 ]] +[[ $out =~ 'No principal matched.' ]] \ No newline at end of file diff --git a/tests/functional/local.mk b/tests/functional/local.mk index 3679349f8..fe0d0c4ed 100644 --- a/tests/functional/local.mk +++ b/tests/functional/local.mk @@ -55,6 +55,7 @@ nix_tests = \ secure-drv-outputs.sh \ restricted.sh \ fetchGitSubmodules.sh \ + fetchGitVerification.sh \ flakes/search-root.sh \ readfile-context.sh \ nix-channel.sh \ From 9b880e3e29c7a485b0e21495f2d089c5151589cc Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 2 Nov 2023 19:39:09 -0400 Subject: [PATCH 10/55] Factor out `MemorySourceAccessor`, implement missing features The new `MemorySourceAccessor` rather than being a slightly lossy flat map is a complete in-memory model of file system objects. Co-authored-by: Eelco Dolstra --- src/libfetchers/input-accessor.hh | 2 +- src/libfetchers/memory-input-accessor.cc | 44 ++------ src/libutil/memory-source-accessor.cc | 124 +++++++++++++++++++++++ src/libutil/memory-source-accessor.hh | 74 ++++++++++++++ 4 files changed, 205 insertions(+), 39 deletions(-) create mode 100644 src/libutil/memory-source-accessor.cc create mode 100644 src/libutil/memory-source-accessor.hh diff --git a/src/libfetchers/input-accessor.hh b/src/libfetchers/input-accessor.hh index 5dc05a363..68fdf07a7 100644 --- a/src/libfetchers/input-accessor.hh +++ b/src/libfetchers/input-accessor.hh @@ -14,7 +14,7 @@ struct SourcePath; class StorePath; class Store; -struct InputAccessor : SourceAccessor, std::enable_shared_from_this +struct InputAccessor : virtual SourceAccessor, std::enable_shared_from_this { /** * Return the maximum last-modified time of the files in this diff --git a/src/libfetchers/memory-input-accessor.cc b/src/libfetchers/memory-input-accessor.cc index 6468ece41..057f3e37f 100644 --- a/src/libfetchers/memory-input-accessor.cc +++ b/src/libfetchers/memory-input-accessor.cc @@ -1,48 +1,16 @@ #include "memory-input-accessor.hh" +#include "memory-source-accessor.hh" namespace nix { -struct MemoryInputAccessorImpl : MemoryInputAccessor +struct MemoryInputAccessorImpl : MemoryInputAccessor, MemorySourceAccessor { - std::map files; - - std::string readFile(const CanonPath & path) override - { - auto i = files.find(path); - if (i == files.end()) - throw Error("file '%s' does not exist", path); - return i->second; - } - - bool pathExists(const CanonPath & path) override - { - auto i = files.find(path); - return i != files.end(); - } - - std::optional maybeLstat(const CanonPath & path) override - { - auto i = files.find(path); - if (i != files.end()) - return Stat { .type = tRegular, .isExecutable = false }; - return std::nullopt; - } - - DirEntries readDirectory(const CanonPath & path) override - { - return {}; - } - - std::string readLink(const CanonPath & path) override - { - throw UnimplementedError("MemoryInputAccessor::readLink"); - } - SourcePath addFile(CanonPath path, std::string && contents) override { - files.emplace(path, std::move(contents)); - - return {ref(shared_from_this()), std::move(path)}; + return { + ref(shared_from_this()), + MemorySourceAccessor::addFile(path, std::move(contents)) + }; } }; diff --git a/src/libutil/memory-source-accessor.cc b/src/libutil/memory-source-accessor.cc new file mode 100644 index 000000000..f34f6c091 --- /dev/null +++ b/src/libutil/memory-source-accessor.cc @@ -0,0 +1,124 @@ +#include "memory-source-accessor.hh" + +namespace nix { + +MemorySourceAccessor::File * +MemorySourceAccessor::open(const CanonPath & path, std::optional create) +{ + File * cur = &root; + + bool newF = false; + + for (std::string_view name : path) + { + auto * curDirP = std::get_if(&cur->raw); + if (!curDirP) + return nullptr; + auto & curDir = *curDirP; + + auto i = curDir.contents.find(name); + if (i == curDir.contents.end()) { + if (!create) + return nullptr; + else { + newF = true; + i = curDir.contents.insert(i, { + std::string { name }, + File::Directory {}, + }); + } + } + cur = &i->second; + } + + if (newF && create) *cur = std::move(*create); + + return cur; +} + +std::string MemorySourceAccessor::readFile(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * r = std::get_if(&f->raw)) + return r->contents; + else + throw Error("file '%s' is not a regular file", path); +} + +bool MemorySourceAccessor::pathExists(const CanonPath & path) +{ + return open(path, std::nullopt); +} + +MemorySourceAccessor::Stat MemorySourceAccessor::File::lstat() const +{ + return std::visit(overloaded { + [](const Regular & r) { + return Stat { + .type = tRegular, + .fileSize = r.contents.size(), + .isExecutable = r.executable, + }; + }, + [](const Directory &) { + return Stat { + .type = tDirectory, + }; + }, + [](const Symlink &) { + return Stat { + .type = tSymlink, + }; + }, + }, this->raw); +} + +std::optional +MemorySourceAccessor::maybeLstat(const CanonPath & path) +{ + const auto * f = open(path, std::nullopt); + return f ? std::optional { f->lstat() } : std::nullopt; +} + +MemorySourceAccessor::DirEntries MemorySourceAccessor::readDirectory(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * d = std::get_if(&f->raw)) { + DirEntries res; + for (auto & [name, file] : d->contents) + res.insert_or_assign(name, file.lstat().type); + return res; + } else + throw Error("file '%s' is not a directory", path); + return {}; +} + +std::string MemorySourceAccessor::readLink(const CanonPath & path) +{ + auto * f = open(path, std::nullopt); + if (!f) + throw Error("file '%s' does not exist", path); + if (auto * s = std::get_if(&f->raw)) + return s->target; + else + throw Error("file '%s' is not a symbolic link", path); +} + +CanonPath MemorySourceAccessor::addFile(CanonPath path, std::string && contents) +{ + auto * f = open(path, File { File::Regular {} }); + if (!f) + throw Error("file '%s' cannot be made because some parent file is not a directory", path); + if (auto * r = std::get_if(&f->raw)) + r->contents = std::move(contents); + else + throw Error("file '%s' is not a regular file", path); + + return path; +} + +} diff --git a/src/libutil/memory-source-accessor.hh b/src/libutil/memory-source-accessor.hh new file mode 100644 index 000000000..014fa8098 --- /dev/null +++ b/src/libutil/memory-source-accessor.hh @@ -0,0 +1,74 @@ +#include "source-accessor.hh" +#include "variant-wrapper.hh" + +namespace nix { + +/** + * An source accessor for an in-memory file system. + */ +struct MemorySourceAccessor : virtual SourceAccessor +{ + /** + * In addition to being part of the implementation of + * `MemorySourceAccessor`, this has a side benefit of nicely + * defining what a "file system object" is in Nix. + */ + struct File { + struct Regular { + bool executable = false; + std::string contents; + + GENERATE_CMP(Regular, me->executable, me->contents); + }; + + struct Directory { + using Name = std::string; + + std::map> contents; + + GENERATE_CMP(Directory, me->contents); + }; + + struct Symlink { + std::string target; + + GENERATE_CMP(Symlink, me->target); + }; + + using Raw = std::variant; + Raw raw; + + MAKE_WRAPPER_CONSTRUCTOR(File); + + GENERATE_CMP(File, me->raw); + + Stat lstat() const; + }; + + File root { File::Directory {} }; + + GENERATE_CMP(MemorySourceAccessor, me->root); + + std::string readFile(const CanonPath & path) override; + bool pathExists(const CanonPath & path) override; + std::optional maybeLstat(const CanonPath & path) override; + DirEntries readDirectory(const CanonPath & path) override; + std::string readLink(const CanonPath & path) override; + + /** + * @param create If present, create this file and any parent directories + * that are needed. + * + * Return null if + * + * - `create = false`: File does not exist. + * + * - `create = true`: some parent file was not a dir, so couldn't + * look/create inside. + */ + File * open(const CanonPath & path, std::optional create); + + CanonPath addFile(CanonPath path, std::string && contents); +}; + +} From 2678b51b31febdc6464935e1680d2272a954c3b5 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 5 Nov 2023 12:17:33 -0500 Subject: [PATCH 11/55] Narrower scope for `nativeSystem` I don't think we need a CPP defininition and a header entry, and this way allows constant expression elimination. --- src/libstore/build/local-derivation-goal.cc | 2 ++ src/libutil/error.cc | 2 -- src/libutil/util.hh | 6 ------ 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index dcb7dc6bc..e1794139f 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -1620,6 +1620,8 @@ void setupSeccomp() seccomp_release(ctx); }); + constexpr std::string_view nativeSystem = SYSTEM; + if (nativeSystem == "x86_64-linux" && seccomp_arch_add(ctx, SCMP_ARCH_X86) != 0) throw SysError("unable to add 32-bit seccomp architecture"); diff --git a/src/libutil/error.cc b/src/libutil/error.cc index dd9612471..1badc1069 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -7,8 +7,6 @@ namespace nix { -const std::string nativeSystem = SYSTEM; - void BaseError::addTrace(std::shared_ptr && e, hintformat hint, bool frame) { err.traces.push_front(Trace { .pos = std::move(e), .hint = hint, .frame = frame }); diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b302d6f45..75683f8fe 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -34,12 +34,6 @@ struct Source; void initLibUtil(); -/** - * The system for which Nix is compiled. - */ -extern const std::string nativeSystem; - - /** * @return an environment variable. */ From ac89bb064aeea85a62b82a6daf0ecca7190a28b7 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 25 Oct 2023 00:43:36 -0400 Subject: [PATCH 12/55] Split up `util.{hh,cc}` All OS and IO operations should be moved out, leaving only some misc portable pure functions. This is useful to avoid copious CPP when doing things like Windows and Emscripten ports. Newly exposed functions to break cycles: - `restoreSignals` - `updateWindowSize` --- perl/lib/Nix/Store.xs | 1 - src/libcmd/built-path.hh | 3 + src/libcmd/common-eval-args.cc | 1 - src/libcmd/editor-for.cc | 2 +- src/libcmd/installable-attr-path.hh | 1 - src/libcmd/installables.cc | 1 + src/libcmd/installables.hh | 1 - src/libcmd/markdown.cc | 1 + src/libcmd/repl.cc | 3 + src/libexpr/attr-path.cc | 1 - src/libexpr/eval-cache.cc | 1 + src/libexpr/eval-settings.cc | 1 + src/libexpr/eval-settings.hh | 2 + src/libexpr/eval.cc | 1 + src/libexpr/flake/config.cc | 3 +- src/libexpr/flake/flake.cc | 1 + src/libexpr/get-drvs.cc | 1 - src/libexpr/parser.y | 1 + src/libexpr/primops.cc | 1 + src/libexpr/search-path.cc | 1 - src/libexpr/value-to-json.cc | 2 +- src/libexpr/value-to-xml.cc | 2 +- src/libexpr/value/context.cc | 1 + src/libexpr/value/context.hh | 1 - src/libfetchers/cache.cc | 1 + src/libfetchers/fetch-settings.hh | 1 - src/libfetchers/git.cc | 3 +- src/libfetchers/input-accessor.hh | 2 + src/libfetchers/mercurial.cc | 2 + src/libfetchers/registry.cc | 2 +- src/libmain/common-args.cc | 2 + src/libmain/loggers.cc | 2 +- src/libmain/progress-bar.cc | 2 +- src/libmain/shared.cc | 3 +- src/libmain/shared.hh | 2 +- src/libstore/binary-cache-store.cc | 1 + src/libstore/build/child.cc | 37 + src/libstore/build/child.hh | 11 + src/libstore/build/hook-instance.cc | 2 + src/libstore/build/hook-instance.hh | 1 + src/libstore/build/local-derivation-goal.cc | 3 + src/libstore/build/local-derivation-goal.hh | 1 + src/libstore/build/worker.cc | 1 + src/libstore/common-protocol.cc | 1 - src/libstore/crypto.cc | 1 + src/libstore/derived-path-map.cc | 1 + src/libstore/derived-path-map.hh | 1 + src/libstore/derived-path.hh | 2 +- src/libstore/filetransfer.cc | 3 +- src/libstore/gc.cc | 7 + src/libstore/globals.cc | 13 +- src/libstore/globals.hh | 2 +- src/libstore/local-store.cc | 1 + src/libstore/local-store.hh | 1 - src/libstore/lock.cc | 1 + src/libstore/machines.cc | 1 - src/libstore/nar-info-disk-cache.cc | 1 + src/libstore/optimise-store.cc | 2 +- src/libstore/path-references.cc | 1 - src/libstore/path-references.hh | 1 + src/libstore/pathlocks.cc | 1 + src/libstore/pathlocks.hh | 2 +- src/libstore/profiles.cc | 2 +- src/libstore/remote-store-connection.hh | 3 + src/libstore/serve-protocol.cc | 1 - src/libstore/sqlite.cc | 1 + src/libstore/ssh.cc | 3 + src/libstore/ssh.hh | 3 +- src/libstore/store-api.cc | 2 + src/libstore/tests/machines.cc | 2 + src/libstore/tests/protocol.hh | 3 + src/libstore/uds-remote-store.cc | 1 + src/libstore/worker-protocol.cc | 1 - src/libutil/archive.cc | 3 +- src/libutil/args.cc | 3 + src/libutil/args.hh | 5 +- src/libutil/canon-path.cc | 2 +- src/libutil/cgroup.cc | 1 + src/libutil/compression.cc | 2 +- src/libutil/config.cc | 2 + src/libutil/current-process.cc | 110 + src/libutil/current-process.hh | 34 + src/libutil/environment-variables.cc | 49 + src/libutil/environment-variables.hh | 41 + src/libutil/error.cc | 3 + src/libutil/file-descriptor.cc | 254 +++ src/libutil/file-descriptor.hh | 84 + src/libutil/file-system.cc | 647 ++++++ src/libutil/file-system.hh | 238 +++ src/libutil/filesystem.cc | 162 -- src/libutil/fs-sink.hh | 1 + src/libutil/hash.cc | 1 - src/libutil/hash.hh | 1 + src/libutil/logging.cc | 3 + src/libutil/monitor-fd.hh | 2 + src/libutil/namespaces.cc | 69 +- src/libutil/namespaces.hh | 23 + src/libutil/posix-source-accessor.cc | 1 + src/libutil/processes.cc | 421 ++++ src/libutil/processes.hh | 123 ++ src/libutil/references.cc | 1 - src/libutil/serialise.cc | 2 +- src/libutil/serialise.hh | 1 + src/libutil/signals.cc | 188 ++ src/libutil/signals.hh | 104 + src/libutil/suggestions.cc | 4 +- src/libutil/tarfile.cc | 1 + src/libutil/terminal.cc | 108 + src/libutil/terminal.hh | 38 + src/libutil/tests/logging.cc | 1 - src/libutil/tests/tests.cc | 3 + src/libutil/thread-pool.cc | 2 + src/libutil/thread-pool.hh | 2 +- src/libutil/unix-domain-socket.cc | 100 + src/libutil/unix-domain-socket.hh | 31 + src/libutil/users.cc | 116 ++ src/libutil/users.hh | 58 + src/libutil/util.cc | 1816 +---------------- src/libutil/util.hh | 617 ------ src/nix-build/nix-build.cc | 2 +- src/nix-channel/nix-channel.cc | 2 +- .../nix-collect-garbage.cc | 2 + src/nix-env/nix-env.cc | 2 +- src/nix-env/user-env.cc | 1 - src/nix-instantiate/nix-instantiate.cc | 1 - src/nix-store/dotgraph.cc | 1 - src/nix-store/graphml.cc | 1 - src/nix-store/nix-store.cc | 1 - src/nix/daemon.cc | 3 +- src/nix/develop.cc | 1 - src/nix/doctor.cc | 1 - src/nix/edit.cc | 1 + src/nix/flake.cc | 1 + src/nix/main.cc | 2 + src/nix/run.cc | 1 + src/nix/sigs.cc | 1 + src/nix/upgrade-nix.cc | 1 + src/nix/verify.cc | 1 + 138 files changed, 3028 insertions(+), 2654 deletions(-) create mode 100644 src/libstore/build/child.cc create mode 100644 src/libstore/build/child.hh create mode 100644 src/libutil/current-process.cc create mode 100644 src/libutil/current-process.hh create mode 100644 src/libutil/environment-variables.cc create mode 100644 src/libutil/environment-variables.hh create mode 100644 src/libutil/file-descriptor.cc create mode 100644 src/libutil/file-descriptor.hh create mode 100644 src/libutil/file-system.cc create mode 100644 src/libutil/file-system.hh delete mode 100644 src/libutil/filesystem.cc create mode 100644 src/libutil/processes.cc create mode 100644 src/libutil/processes.hh create mode 100644 src/libutil/signals.cc create mode 100644 src/libutil/signals.hh create mode 100644 src/libutil/terminal.cc create mode 100644 src/libutil/terminal.hh create mode 100644 src/libutil/unix-domain-socket.cc create mode 100644 src/libutil/unix-domain-socket.hh create mode 100644 src/libutil/users.cc create mode 100644 src/libutil/users.hh diff --git a/perl/lib/Nix/Store.xs b/perl/lib/Nix/Store.xs index 08f812b31..f89ac4077 100644 --- a/perl/lib/Nix/Store.xs +++ b/perl/lib/Nix/Store.xs @@ -11,7 +11,6 @@ #include "derivations.hh" #include "globals.hh" #include "store-api.hh" -#include "util.hh" #include "crypto.hh" #include diff --git a/src/libcmd/built-path.hh b/src/libcmd/built-path.hh index e677bc810..7154cc504 100644 --- a/src/libcmd/built-path.hh +++ b/src/libcmd/built-path.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "derived-path.hh" #include "realisation.hh" diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index e53bc4c01..91fa881b1 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -2,7 +2,6 @@ #include "common-eval-args.hh" #include "shared.hh" #include "filetransfer.hh" -#include "util.hh" #include "eval.hh" #include "fetchers.hh" #include "registry.hh" diff --git a/src/libcmd/editor-for.cc b/src/libcmd/editor-for.cc index a17c6f12a..619d3673f 100644 --- a/src/libcmd/editor-for.cc +++ b/src/libcmd/editor-for.cc @@ -1,5 +1,5 @@ -#include "util.hh" #include "editor-for.hh" +#include "environment-variables.hh" namespace nix { diff --git a/src/libcmd/installable-attr-path.hh b/src/libcmd/installable-attr-path.hh index e9f0c33da..86c2f8219 100644 --- a/src/libcmd/installable-attr-path.hh +++ b/src/libcmd/installable-attr-path.hh @@ -4,7 +4,6 @@ #include "globals.hh" #include "installable-value.hh" #include "outputs-spec.hh" -#include "util.hh" #include "command.hh" #include "attr-path.hh" #include "common-eval-args.hh" diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index bc0b8a988..e7f58556f 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -4,6 +4,7 @@ #include "installable-attr-path.hh" #include "installable-flake.hh" #include "outputs-spec.hh" +#include "users.hh" #include "util.hh" #include "command.hh" #include "attr-path.hh" diff --git a/src/libcmd/installables.hh b/src/libcmd/installables.hh index b0dc0dc02..e087f935c 100644 --- a/src/libcmd/installables.hh +++ b/src/libcmd/installables.hh @@ -1,7 +1,6 @@ #pragma once ///@file -#include "util.hh" #include "path.hh" #include "outputs-spec.hh" #include "derived-path.hh" diff --git a/src/libcmd/markdown.cc b/src/libcmd/markdown.cc index 668a07763..8b3bbc1b5 100644 --- a/src/libcmd/markdown.cc +++ b/src/libcmd/markdown.cc @@ -1,6 +1,7 @@ #include "markdown.hh" #include "util.hh" #include "finally.hh" +#include "terminal.hh" #include #include diff --git a/src/libcmd/repl.cc b/src/libcmd/repl.cc index 2e17a29a7..bf5643a5c 100644 --- a/src/libcmd/repl.cc +++ b/src/libcmd/repl.cc @@ -22,6 +22,7 @@ extern "C" { #include "repl.hh" #include "ansicolor.hh" +#include "signals.hh" #include "shared.hh" #include "eval.hh" #include "eval-cache.hh" @@ -36,6 +37,8 @@ extern "C" { #include "globals.hh" #include "flake/flake.hh" #include "flake/lockfile.hh" +#include "users.hh" +#include "terminal.hh" #include "editor-for.hh" #include "finally.hh" #include "markdown.hh" diff --git a/src/libexpr/attr-path.cc b/src/libexpr/attr-path.cc index d12345710..7481a2232 100644 --- a/src/libexpr/attr-path.cc +++ b/src/libexpr/attr-path.cc @@ -1,6 +1,5 @@ #include "attr-path.hh" #include "eval-inline.hh" -#include "util.hh" namespace nix { diff --git a/src/libexpr/eval-cache.cc b/src/libexpr/eval-cache.cc index 10fc799a9..6c0e33709 100644 --- a/src/libexpr/eval-cache.cc +++ b/src/libexpr/eval-cache.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "eval-cache.hh" #include "sqlite.hh" #include "eval.hh" diff --git a/src/libexpr/eval-settings.cc b/src/libexpr/eval-settings.cc index 93b4a5289..444a7d7d6 100644 --- a/src/libexpr/eval-settings.cc +++ b/src/libexpr/eval-settings.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "globals.hh" #include "profiles.hh" #include "eval.hh" diff --git a/src/libexpr/eval-settings.hh b/src/libexpr/eval-settings.hh index 5473d688e..db2971acb 100644 --- a/src/libexpr/eval-settings.hh +++ b/src/libexpr/eval-settings.hh @@ -1,4 +1,6 @@ #pragma once +///@file + #include "config.hh" namespace nix { diff --git a/src/libexpr/eval.cc b/src/libexpr/eval.cc index d26cde423..dfe81cbf7 100644 --- a/src/libexpr/eval.cc +++ b/src/libexpr/eval.cc @@ -14,6 +14,7 @@ #include "print.hh" #include "fs-input-accessor.hh" #include "memory-input-accessor.hh" +#include "signals.hh" #include #include diff --git a/src/libexpr/flake/config.cc b/src/libexpr/flake/config.cc index e89014862..3c7ed5d8a 100644 --- a/src/libexpr/flake/config.cc +++ b/src/libexpr/flake/config.cc @@ -1,6 +1,7 @@ -#include "flake.hh" +#include "users.hh" #include "globals.hh" #include "fetch-settings.hh" +#include "flake.hh" #include diff --git a/src/libexpr/flake/flake.cc b/src/libexpr/flake/flake.cc index ded132695..54de53e0b 100644 --- a/src/libexpr/flake/flake.cc +++ b/src/libexpr/flake/flake.cc @@ -1,3 +1,4 @@ +#include "terminal.hh" #include "flake.hh" #include "eval.hh" #include "eval-settings.hh" diff --git a/src/libexpr/get-drvs.cc b/src/libexpr/get-drvs.cc index fe3e6f7ee..d4e946d81 100644 --- a/src/libexpr/get-drvs.cc +++ b/src/libexpr/get-drvs.cc @@ -1,5 +1,4 @@ #include "get-drvs.hh" -#include "util.hh" #include "eval-inline.hh" #include "derivations.hh" #include "store-api.hh" diff --git a/src/libexpr/parser.y b/src/libexpr/parser.y index 607795937..b86cef217 100644 --- a/src/libexpr/parser.y +++ b/src/libexpr/parser.y @@ -19,6 +19,7 @@ #include #include "util.hh" +#include "users.hh" #include "nixexpr.hh" #include "eval.hh" diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index e3c775d90..36340d0f9 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -10,6 +10,7 @@ #include "path-references.hh" #include "store-api.hh" #include "util.hh" +#include "processes.hh" #include "value-to-json.hh" #include "value-to-xml.hh" #include "primops.hh" diff --git a/src/libexpr/search-path.cc b/src/libexpr/search-path.cc index 180d5f8b1..a25767496 100644 --- a/src/libexpr/search-path.cc +++ b/src/libexpr/search-path.cc @@ -1,5 +1,4 @@ #include "search-path.hh" -#include "util.hh" namespace nix { diff --git a/src/libexpr/value-to-json.cc b/src/libexpr/value-to-json.cc index cbc91f509..74b3ebf13 100644 --- a/src/libexpr/value-to-json.cc +++ b/src/libexpr/value-to-json.cc @@ -1,7 +1,7 @@ #include "value-to-json.hh" #include "eval-inline.hh" -#include "util.hh" #include "store-api.hh" +#include "signals.hh" #include #include diff --git a/src/libexpr/value-to-xml.cc b/src/libexpr/value-to-xml.cc index bd7a4ae30..5032115bb 100644 --- a/src/libexpr/value-to-xml.cc +++ b/src/libexpr/value-to-xml.cc @@ -1,7 +1,7 @@ #include "value-to-xml.hh" #include "xml-writer.hh" #include "eval-inline.hh" -#include "util.hh" +#include "signals.hh" #include diff --git a/src/libexpr/value/context.cc b/src/libexpr/value/context.cc index 22361d8fa..6d9633268 100644 --- a/src/libexpr/value/context.cc +++ b/src/libexpr/value/context.cc @@ -1,3 +1,4 @@ +#include "util.hh" #include "value/context.hh" #include diff --git a/src/libexpr/value/context.hh b/src/libexpr/value/context.hh index 9f1d59317..51fd30a44 100644 --- a/src/libexpr/value/context.hh +++ b/src/libexpr/value/context.hh @@ -1,7 +1,6 @@ #pragma once ///@file -#include "util.hh" #include "comparator.hh" #include "derived-path.hh" #include "variant-wrapper.hh" diff --git a/src/libfetchers/cache.cc b/src/libfetchers/cache.cc index 0c8ecac9d..b72a464e8 100644 --- a/src/libfetchers/cache.cc +++ b/src/libfetchers/cache.cc @@ -1,4 +1,5 @@ #include "cache.hh" +#include "users.hh" #include "sqlite.hh" #include "sync.hh" #include "store-api.hh" diff --git a/src/libfetchers/fetch-settings.hh b/src/libfetchers/fetch-settings.hh index 6108a179c..f095963a8 100644 --- a/src/libfetchers/fetch-settings.hh +++ b/src/libfetchers/fetch-settings.hh @@ -3,7 +3,6 @@ #include "types.hh" #include "config.hh" -#include "util.hh" #include #include diff --git a/src/libfetchers/git.cc b/src/libfetchers/git.cc index 72fba0582..cc735996b 100644 --- a/src/libfetchers/git.cc +++ b/src/libfetchers/git.cc @@ -1,11 +1,12 @@ #include "fetchers.hh" +#include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" #include "store-api.hh" #include "url-parts.hh" #include "pathlocks.hh" -#include "util.hh" +#include "processes.hh" #include "git.hh" #include "fetch-settings.hh" diff --git a/src/libfetchers/input-accessor.hh b/src/libfetchers/input-accessor.hh index 5dc05a363..6857ce156 100644 --- a/src/libfetchers/input-accessor.hh +++ b/src/libfetchers/input-accessor.hh @@ -1,8 +1,10 @@ #pragma once +///@file #include "source-accessor.hh" #include "ref.hh" #include "types.hh" +#include "file-system.hh" #include "repair-flag.hh" #include "content-address.hh" diff --git a/src/libfetchers/mercurial.cc b/src/libfetchers/mercurial.cc index eda33dfe7..9244acf39 100644 --- a/src/libfetchers/mercurial.cc +++ b/src/libfetchers/mercurial.cc @@ -1,4 +1,6 @@ #include "fetchers.hh" +#include "processes.hh" +#include "users.hh" #include "cache.hh" #include "globals.hh" #include "tarfile.hh" diff --git a/src/libfetchers/registry.cc b/src/libfetchers/registry.cc index a0fff9ceb..9c7bc0cfe 100644 --- a/src/libfetchers/registry.cc +++ b/src/libfetchers/registry.cc @@ -1,6 +1,6 @@ #include "registry.hh" #include "tarball.hh" -#include "util.hh" +#include "users.hh" #include "globals.hh" #include "store-api.hh" #include "local-fs-store.hh" diff --git a/src/libmain/common-args.cc b/src/libmain/common-args.cc index 205b77808..5b49aaabc 100644 --- a/src/libmain/common-args.cc +++ b/src/libmain/common-args.cc @@ -1,7 +1,9 @@ #include "common-args.hh" #include "args/root.hh" #include "globals.hh" +#include "logging.hh" #include "loggers.hh" +#include "util.hh" namespace nix { diff --git a/src/libmain/loggers.cc b/src/libmain/loggers.cc index cda5cb939..9829859de 100644 --- a/src/libmain/loggers.cc +++ b/src/libmain/loggers.cc @@ -1,6 +1,6 @@ #include "loggers.hh" +#include "environment-variables.hh" #include "progress-bar.hh" -#include "util.hh" namespace nix { diff --git a/src/libmain/progress-bar.cc b/src/libmain/progress-bar.cc index 45b1fdfd1..a7aee47c3 100644 --- a/src/libmain/progress-bar.cc +++ b/src/libmain/progress-bar.cc @@ -1,5 +1,5 @@ #include "progress-bar.hh" -#include "util.hh" +#include "terminal.hh" #include "sync.hh" #include "store-api.hh" #include "names.hh" diff --git a/src/libmain/shared.cc b/src/libmain/shared.cc index 9c2ad039a..862ef355b 100644 --- a/src/libmain/shared.cc +++ b/src/libmain/shared.cc @@ -1,10 +1,11 @@ #include "globals.hh" +#include "current-process.hh" #include "shared.hh" #include "store-api.hh" #include "gc-store.hh" -#include "util.hh" #include "loggers.hh" #include "progress-bar.hh" +#include "signals.hh" #include #include diff --git a/src/libmain/shared.hh b/src/libmain/shared.hh index 3159fe479..c68f6cd83 100644 --- a/src/libmain/shared.hh +++ b/src/libmain/shared.hh @@ -1,7 +1,7 @@ #pragma once ///@file -#include "util.hh" +#include "processes.hh" #include "args.hh" #include "args/root.hh" #include "common-args.hh" diff --git a/src/libstore/binary-cache-store.cc b/src/libstore/binary-cache-store.cc index 6a52c4c51..ae483c95e 100644 --- a/src/libstore/binary-cache-store.cc +++ b/src/libstore/binary-cache-store.cc @@ -11,6 +11,7 @@ #include "nar-accessor.hh" #include "thread-pool.hh" #include "callback.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/build/child.cc b/src/libstore/build/child.cc new file mode 100644 index 000000000..aa31c3caf --- /dev/null +++ b/src/libstore/build/child.cc @@ -0,0 +1,37 @@ +#include "child.hh" +#include "current-process.hh" +#include "logging.hh" + +#include +#include + +namespace nix { + +void commonChildInit() +{ + logger = makeSimpleLogger(); + + const static std::string pathNullDevice = "/dev/null"; + restoreProcessContext(false); + + /* Put the child in a separate session (and thus a separate + process group) so that it has no controlling terminal (meaning + that e.g. ssh cannot open /dev/tty) and it doesn't receive + terminal signals. */ + if (setsid() == -1) + throw SysError("creating a new session"); + + /* Dup stderr to stdout. */ + if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) + throw SysError("cannot dup stderr into stdout"); + + /* Reroute stdin to /dev/null. */ + int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); + if (fdDevNull == -1) + throw SysError("cannot open '%1%'", pathNullDevice); + if (dup2(fdDevNull, STDIN_FILENO) == -1) + throw SysError("cannot dup null device into stdin"); + close(fdDevNull); +} + +} diff --git a/src/libstore/build/child.hh b/src/libstore/build/child.hh new file mode 100644 index 000000000..3dfc552b9 --- /dev/null +++ b/src/libstore/build/child.hh @@ -0,0 +1,11 @@ +#pragma once +///@file + +namespace nix { + +/** + * Common initialisation performed in child processes. + */ +void commonChildInit(); + +} diff --git a/src/libstore/build/hook-instance.cc b/src/libstore/build/hook-instance.cc index 337c60bd4..5d045ec3d 100644 --- a/src/libstore/build/hook-instance.cc +++ b/src/libstore/build/hook-instance.cc @@ -1,5 +1,7 @@ #include "globals.hh" #include "hook-instance.hh" +#include "file-system.hh" +#include "child.hh" namespace nix { diff --git a/src/libstore/build/hook-instance.hh b/src/libstore/build/hook-instance.hh index d84f62877..61cf534f4 100644 --- a/src/libstore/build/hook-instance.hh +++ b/src/libstore/build/hook-instance.hh @@ -3,6 +3,7 @@ #include "logging.hh" #include "serialise.hh" +#include "processes.hh" namespace nix { diff --git a/src/libstore/build/local-derivation-goal.cc b/src/libstore/build/local-derivation-goal.cc index e1794139f..adb011e30 100644 --- a/src/libstore/build/local-derivation-goal.cc +++ b/src/libstore/build/local-derivation-goal.cc @@ -15,7 +15,10 @@ #include "json-utils.hh" #include "cgroup.hh" #include "personality.hh" +#include "current-process.hh" #include "namespaces.hh" +#include "child.hh" +#include "unix-domain-socket.hh" #include #include diff --git a/src/libstore/build/local-derivation-goal.hh b/src/libstore/build/local-derivation-goal.hh index 1cb68a869..88152a645 100644 --- a/src/libstore/build/local-derivation-goal.hh +++ b/src/libstore/build/local-derivation-goal.hh @@ -3,6 +3,7 @@ #include "derivation-goal.hh" #include "local-store.hh" +#include "processes.hh" namespace nix { diff --git a/src/libstore/build/worker.cc b/src/libstore/build/worker.cc index 37cb86b91..01914e2d6 100644 --- a/src/libstore/build/worker.cc +++ b/src/libstore/build/worker.cc @@ -4,6 +4,7 @@ #include "drv-output-substitution-goal.hh" #include "local-derivation-goal.hh" #include "hook-instance.hh" +#include "signals.hh" #include diff --git a/src/libstore/common-protocol.cc b/src/libstore/common-protocol.cc index f906814bc..68445258f 100644 --- a/src/libstore/common-protocol.cc +++ b/src/libstore/common-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" diff --git a/src/libstore/crypto.cc b/src/libstore/crypto.cc index 1027469c9..1b705733c 100644 --- a/src/libstore/crypto.cc +++ b/src/libstore/crypto.cc @@ -1,4 +1,5 @@ #include "crypto.hh" +#include "file-system.hh" #include "util.hh" #include "globals.hh" diff --git a/src/libstore/derived-path-map.cc b/src/libstore/derived-path-map.cc index 5982c04b3..4c1ea417a 100644 --- a/src/libstore/derived-path-map.cc +++ b/src/libstore/derived-path-map.cc @@ -1,4 +1,5 @@ #include "derived-path-map.hh" +#include "util.hh" namespace nix { diff --git a/src/libstore/derived-path-map.hh b/src/libstore/derived-path-map.hh index 4d72b301e..393cdedf7 100644 --- a/src/libstore/derived-path-map.hh +++ b/src/libstore/derived-path-map.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "types.hh" #include "derived-path.hh" diff --git a/src/libstore/derived-path.hh b/src/libstore/derived-path.hh index 4d7033df2..6c5dfeed9 100644 --- a/src/libstore/derived-path.hh +++ b/src/libstore/derived-path.hh @@ -1,10 +1,10 @@ #pragma once ///@file -#include "util.hh" #include "path.hh" #include "outputs-spec.hh" #include "comparator.hh" +#include "config.hh" #include diff --git a/src/libstore/filetransfer.cc b/src/libstore/filetransfer.cc index a283af5a2..dcbec4acd 100644 --- a/src/libstore/filetransfer.cc +++ b/src/libstore/filetransfer.cc @@ -1,11 +1,12 @@ #include "filetransfer.hh" -#include "util.hh" +#include "namespaces.hh" #include "globals.hh" #include "store-api.hh" #include "s3.hh" #include "compression.hh" #include "finally.hh" #include "callback.hh" +#include "signals.hh" #if ENABLE_S3 #include diff --git a/src/libstore/gc.cc b/src/libstore/gc.cc index fb7895817..8d05ae4bd 100644 --- a/src/libstore/gc.cc +++ b/src/libstore/gc.cc @@ -2,6 +2,13 @@ #include "globals.hh" #include "local-store.hh" #include "finally.hh" +#include "unix-domain-socket.hh" +#include "signals.hh" + +#if !defined(__linux__) +// For shelling out to lsof +# include "processes.hh" +#endif #include #include diff --git a/src/libstore/globals.cc b/src/libstore/globals.cc index 9c25d9868..cc416a4d6 100644 --- a/src/libstore/globals.cc +++ b/src/libstore/globals.cc @@ -1,7 +1,8 @@ #include "globals.hh" -#include "util.hh" +#include "current-process.hh" #include "archive.hh" #include "args.hh" +#include "users.hh" #include "abstract-setting-to-json.hh" #include "compute-levels.hh" @@ -17,9 +18,13 @@ #include #ifdef __GLIBC__ -#include -#include -#include +# include +# include +# include +#endif + +#if __APPLE__ +# include "processes.hh" #endif #include "config-impl.hh" diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh index 12fb48d93..8e034f5a9 100644 --- a/src/libstore/globals.hh +++ b/src/libstore/globals.hh @@ -3,7 +3,7 @@ #include "types.hh" #include "config.hh" -#include "util.hh" +#include "environment-variables.hh" #include "experimental-features.hh" #include diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc index a5e9426f8..2a3582ad8 100644 --- a/src/libstore/local-store.cc +++ b/src/libstore/local-store.cc @@ -10,6 +10,7 @@ #include "topo-sort.hh" #include "finally.hh" #include "compression.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/local-store.hh b/src/libstore/local-store.hh index fe26a0f27..6d589bee5 100644 --- a/src/libstore/local-store.hh +++ b/src/libstore/local-store.hh @@ -7,7 +7,6 @@ #include "store-api.hh" #include "indirect-root-store.hh" #include "sync.hh" -#include "util.hh" #include #include diff --git a/src/libstore/lock.cc b/src/libstore/lock.cc index 165e4969f..87f55ce49 100644 --- a/src/libstore/lock.cc +++ b/src/libstore/lock.cc @@ -1,4 +1,5 @@ #include "lock.hh" +#include "file-system.hh" #include "globals.hh" #include "pathlocks.hh" diff --git a/src/libstore/machines.cc b/src/libstore/machines.cc index e87f46980..512115893 100644 --- a/src/libstore/machines.cc +++ b/src/libstore/machines.cc @@ -1,5 +1,4 @@ #include "machines.hh" -#include "util.hh" #include "globals.hh" #include "store-api.hh" diff --git a/src/libstore/nar-info-disk-cache.cc b/src/libstore/nar-info-disk-cache.cc index cdbcf7e74..e50c15939 100644 --- a/src/libstore/nar-info-disk-cache.cc +++ b/src/libstore/nar-info-disk-cache.cc @@ -1,4 +1,5 @@ #include "nar-info-disk-cache.hh" +#include "users.hh" #include "sync.hh" #include "sqlite.hh" #include "globals.hh" diff --git a/src/libstore/optimise-store.cc b/src/libstore/optimise-store.cc index 23c6a41e4..a4ac413b3 100644 --- a/src/libstore/optimise-store.cc +++ b/src/libstore/optimise-store.cc @@ -1,6 +1,6 @@ -#include "util.hh" #include "local-store.hh" #include "globals.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/path-references.cc b/src/libstore/path-references.cc index 33cf66ce3..274b596c0 100644 --- a/src/libstore/path-references.cc +++ b/src/libstore/path-references.cc @@ -1,6 +1,5 @@ #include "path-references.hh" #include "hash.hh" -#include "util.hh" #include "archive.hh" #include diff --git a/src/libstore/path-references.hh b/src/libstore/path-references.hh index 7b44e3261..0553003f8 100644 --- a/src/libstore/path-references.hh +++ b/src/libstore/path-references.hh @@ -1,4 +1,5 @@ #pragma once +///@file #include "references.hh" #include "path.hh" diff --git a/src/libstore/pathlocks.cc b/src/libstore/pathlocks.cc index adc763e6a..2b5b8dfe7 100644 --- a/src/libstore/pathlocks.cc +++ b/src/libstore/pathlocks.cc @@ -1,6 +1,7 @@ #include "pathlocks.hh" #include "util.hh" #include "sync.hh" +#include "signals.hh" #include #include diff --git a/src/libstore/pathlocks.hh b/src/libstore/pathlocks.hh index 4921df352..7fcfa2e40 100644 --- a/src/libstore/pathlocks.hh +++ b/src/libstore/pathlocks.hh @@ -1,7 +1,7 @@ #pragma once ///@file -#include "util.hh" +#include "file-descriptor.hh" namespace nix { diff --git a/src/libstore/profiles.cc b/src/libstore/profiles.cc index 239047dd6..e8b88693d 100644 --- a/src/libstore/profiles.cc +++ b/src/libstore/profiles.cc @@ -1,7 +1,7 @@ #include "profiles.hh" #include "store-api.hh" #include "local-fs-store.hh" -#include "util.hh" +#include "users.hh" #include #include diff --git a/src/libstore/remote-store-connection.hh b/src/libstore/remote-store-connection.hh index e4a9cacb9..44328b06b 100644 --- a/src/libstore/remote-store-connection.hh +++ b/src/libstore/remote-store-connection.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include "remote-store.hh" #include "worker-protocol.hh" #include "pool.hh" diff --git a/src/libstore/serve-protocol.cc b/src/libstore/serve-protocol.cc index 97a0ddf0e..9bfcc279c 100644 --- a/src/libstore/serve-protocol.cc +++ b/src/libstore/serve-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" diff --git a/src/libstore/sqlite.cc b/src/libstore/sqlite.cc index 7c8decb74..d7432a305 100644 --- a/src/libstore/sqlite.cc +++ b/src/libstore/sqlite.cc @@ -2,6 +2,7 @@ #include "globals.hh" #include "util.hh" #include "url.hh" +#include "signals.hh" #include diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index da32f1b79..03b2f0be9 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -1,5 +1,8 @@ #include "ssh.hh" #include "finally.hh" +#include "current-process.hh" +#include "environment-variables.hh" +#include "util.hh" namespace nix { diff --git a/src/libstore/ssh.hh b/src/libstore/ssh.hh index 94b952af9..bfcd6f21c 100644 --- a/src/libstore/ssh.hh +++ b/src/libstore/ssh.hh @@ -1,8 +1,9 @@ #pragma once ///@file -#include "util.hh" #include "sync.hh" +#include "processes.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index 646b0ec7d..c9ebb6c14 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -14,6 +14,8 @@ // FIXME this should not be here, see TODO below on // `addMultipleToStore`. #include "worker-protocol.hh" +#include "signals.hh" +#include "users.hh" #include #include diff --git a/src/libstore/tests/machines.cc b/src/libstore/tests/machines.cc index f51052b14..fede328ea 100644 --- a/src/libstore/tests/machines.cc +++ b/src/libstore/tests/machines.cc @@ -1,5 +1,7 @@ #include "machines.hh" #include "globals.hh" +#include "file-system.hh" +#include "util.hh" #include diff --git a/src/libstore/tests/protocol.hh b/src/libstore/tests/protocol.hh index 0378b3e1f..466032a79 100644 --- a/src/libstore/tests/protocol.hh +++ b/src/libstore/tests/protocol.hh @@ -1,3 +1,6 @@ +#pragma once +///@file + #include #include diff --git a/src/libstore/uds-remote-store.cc b/src/libstore/uds-remote-store.cc index 99589f8b2..226cdf717 100644 --- a/src/libstore/uds-remote-store.cc +++ b/src/libstore/uds-remote-store.cc @@ -1,4 +1,5 @@ #include "uds-remote-store.hh" +#include "unix-domain-socket.hh" #include "worker-protocol.hh" #include diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index d618b9bd8..1d202f8d1 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -1,5 +1,4 @@ #include "serialise.hh" -#include "util.hh" #include "path-with-outputs.hh" #include "store-api.hh" #include "build-result.hh" diff --git a/src/libutil/archive.cc b/src/libutil/archive.cc index 4ca84d357..465df2073 100644 --- a/src/libutil/archive.cc +++ b/src/libutil/archive.cc @@ -6,9 +6,10 @@ #include // for strcasecmp #include "archive.hh" -#include "util.hh" #include "config.hh" #include "posix-source-accessor.hh" +#include "file-system.hh" +#include "signals.hh" namespace nix { diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 811353c18..0b65519a3 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -1,6 +1,9 @@ #include "args.hh" #include "args/root.hh" #include "hash.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "users.hh" #include "json-utils.hh" #include diff --git a/src/libutil/args.hh b/src/libutil/args.hh index e3b41313f..45fd678e7 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -2,12 +2,15 @@ ///@file #include +#include #include #include +#include #include -#include "util.hh" +#include "types.hh" +#include "experimental-features.hh" namespace nix { diff --git a/src/libutil/canon-path.cc b/src/libutil/canon-path.cc index 040464532..f678fae94 100644 --- a/src/libutil/canon-path.cc +++ b/src/libutil/canon-path.cc @@ -1,5 +1,5 @@ #include "canon-path.hh" -#include "util.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/cgroup.cc b/src/libutil/cgroup.cc index a008481ca..4c2bf31ff 100644 --- a/src/libutil/cgroup.cc +++ b/src/libutil/cgroup.cc @@ -2,6 +2,7 @@ #include "cgroup.hh" #include "util.hh" +#include "file-system.hh" #include "finally.hh" #include diff --git a/src/libutil/compression.cc b/src/libutil/compression.cc index ba0847cde..d06f1f87b 100644 --- a/src/libutil/compression.cc +++ b/src/libutil/compression.cc @@ -1,6 +1,6 @@ #include "compression.hh" +#include "signals.hh" #include "tarfile.hh" -#include "util.hh" #include "finally.hh" #include "logging.hh" diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 8e06273ee..0bf36c987 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -2,6 +2,8 @@ #include "args.hh" #include "abstract-setting-to-json.hh" #include "experimental-features.hh" +#include "util.hh" +#include "file-system.hh" #include "config-impl.hh" diff --git a/src/libutil/current-process.cc b/src/libutil/current-process.cc new file mode 100644 index 000000000..352a6a0fb --- /dev/null +++ b/src/libutil/current-process.cc @@ -0,0 +1,110 @@ +#include "current-process.hh" +#include "namespaces.hh" +#include "util.hh" +#include "finally.hh" +#include "file-system.hh" +#include "processes.hh" +#include "signals.hh" + +#ifdef __APPLE__ +# include +#endif + +#if __linux__ +# include +# include +# include "cgroup.hh" +#endif + +#include + +namespace nix { + +unsigned int getMaxCPU() +{ + #if __linux__ + try { + auto cgroupFS = getCgroupFS(); + if (!cgroupFS) return 0; + + auto cgroups = getCgroups("/proc/self/cgroup"); + auto cgroup = cgroups[""]; + if (cgroup == "") return 0; + + auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max"; + + auto cpuMax = readFile(cpuFile); + auto cpuMaxParts = tokenizeString>(cpuMax, " \n"); + auto quota = cpuMaxParts[0]; + auto period = cpuMaxParts[1]; + if (quota != "max") + return std::ceil(std::stoi(quota) / std::stof(period)); + } catch (Error &) { ignoreException(lvlDebug); } + #endif + + return 0; +} + + +////////////////////////////////////////////////////////////////////// + + +#if __linux__ +rlim_t savedStackSize = 0; +#endif + +void setStackSize(size_t stackSize) +{ + #if __linux__ + struct rlimit limit; + if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { + savedStackSize = limit.rlim_cur; + limit.rlim_cur = stackSize; + setrlimit(RLIMIT_STACK, &limit); + } + #endif +} + +void restoreProcessContext(bool restoreMounts) +{ + restoreSignals(); + if (restoreMounts) { + restoreMountNamespace(); + } + + #if __linux__ + if (savedStackSize) { + struct rlimit limit; + if (getrlimit(RLIMIT_STACK, &limit) == 0) { + limit.rlim_cur = savedStackSize; + setrlimit(RLIMIT_STACK, &limit); + } + } + #endif +} + + +////////////////////////////////////////////////////////////////////// + + +std::optional getSelfExe() +{ + static auto cached = []() -> std::optional + { + #if __linux__ + return readLink("/proc/self/exe"); + #elif __APPLE__ + char buf[1024]; + uint32_t size = sizeof(buf); + if (_NSGetExecutablePath(buf, &size) == 0) + return buf; + else + return std::nullopt; + #else + return std::nullopt; + #endif + }(); + return cached; +} + +} diff --git a/src/libutil/current-process.hh b/src/libutil/current-process.hh new file mode 100644 index 000000000..826d6fe20 --- /dev/null +++ b/src/libutil/current-process.hh @@ -0,0 +1,34 @@ +#pragma once +///@file + +#include + +#include "types.hh" + +namespace nix { + +/** + * If cgroups are active, attempt to calculate the number of CPUs available. + * If cgroups are unavailable or if cpu.max is set to "max", return 0. + */ +unsigned int getMaxCPU(); + +/** + * Change the stack size. + */ +void setStackSize(size_t stackSize); + +/** + * Restore the original inherited Unix process context (such as signal + * masks, stack size). + + * See startSignalHandlerThread(), saveSignalMask(). + */ +void restoreProcessContext(bool restoreMounts = true); + +/** + * @return the path of the current executable. + */ +std::optional getSelfExe(); + +} diff --git a/src/libutil/environment-variables.cc b/src/libutil/environment-variables.cc new file mode 100644 index 000000000..6618d7872 --- /dev/null +++ b/src/libutil/environment-variables.cc @@ -0,0 +1,49 @@ +#include "util.hh" +#include "environment-variables.hh" + +extern char * * environ __attribute__((weak)); + +namespace nix { + +std::optional getEnv(const std::string & key) +{ + char * value = getenv(key.c_str()); + if (!value) return {}; + return std::string(value); +} + +std::optional getEnvNonEmpty(const std::string & key) { + auto value = getEnv(key); + if (value == "") return {}; + return value; +} + +std::map getEnv() +{ + std::map env; + for (size_t i = 0; environ[i]; ++i) { + auto s = environ[i]; + auto eq = strchr(s, '='); + if (!eq) + // invalid env, just keep going + continue; + env.emplace(std::string(s, eq), std::string(eq + 1)); + } + return env; +} + + +void clearEnv() +{ + for (auto & name : getEnv()) + unsetenv(name.first.c_str()); +} + +void replaceEnv(const std::map & newEnv) +{ + clearEnv(); + for (auto & newEnvVar : newEnv) + setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1); +} + +} diff --git a/src/libutil/environment-variables.hh b/src/libutil/environment-variables.hh new file mode 100644 index 000000000..21eb4619b --- /dev/null +++ b/src/libutil/environment-variables.hh @@ -0,0 +1,41 @@ +#pragma once +/** + * @file + * + * Utilities for working with the current process's environment + * variables. + */ + +#include + +#include "types.hh" + +namespace nix { + +/** + * @return an environment variable. + */ +std::optional getEnv(const std::string & key); + +/** + * @return a non empty environment variable. Returns nullopt if the env + * variable is set to "" + */ +std::optional getEnvNonEmpty(const std::string & key); + +/** + * Get the entire environment. + */ +std::map getEnv(); + +/** + * Clear the environment. + */ +void clearEnv(); + +/** + * Replace the entire environment with the given one. + */ +void replaceEnv(const std::map & newEnv); + +} diff --git a/src/libutil/error.cc b/src/libutil/error.cc index 1badc1069..8488e7e21 100644 --- a/src/libutil/error.cc +++ b/src/libutil/error.cc @@ -1,4 +1,7 @@ #include "error.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "terminal.hh" #include #include diff --git a/src/libutil/file-descriptor.cc b/src/libutil/file-descriptor.cc new file mode 100644 index 000000000..38dd70c8e --- /dev/null +++ b/src/libutil/file-descriptor.cc @@ -0,0 +1,254 @@ +#include "file-system.hh" +#include "signals.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include + +namespace nix { + +std::string readFile(int fd) +{ + struct stat st; + if (fstat(fd, &st) == -1) + throw SysError("statting file"); + + return drainFD(fd, true, st.st_size); +} + + +void readFull(int fd, char * buf, size_t count) +{ + while (count) { + checkInterrupt(); + ssize_t res = read(fd, buf, count); + if (res == -1) { + if (errno == EINTR) continue; + throw SysError("reading from file"); + } + if (res == 0) throw EndOfFile("unexpected end-of-file"); + count -= res; + buf += res; + } +} + + +void writeFull(int fd, std::string_view s, bool allowInterrupts) +{ + while (!s.empty()) { + if (allowInterrupts) checkInterrupt(); + ssize_t res = write(fd, s.data(), s.size()); + if (res == -1 && errno != EINTR) + throw SysError("writing to file"); + if (res > 0) + s.remove_prefix(res); + } +} + + +std::string readLine(int fd) +{ + std::string s; + while (1) { + checkInterrupt(); + char ch; + // FIXME: inefficient + ssize_t rd = read(fd, &ch, 1); + if (rd == -1) { + if (errno != EINTR) + throw SysError("reading a line"); + } else if (rd == 0) + throw EndOfFile("unexpected EOF reading a line"); + else { + if (ch == '\n') return s; + s += ch; + } + } +} + + +void writeLine(int fd, std::string s) +{ + s += '\n'; + writeFull(fd, s); +} + + +std::string drainFD(int fd, bool block, const size_t reserveSize) +{ + // the parser needs two extra bytes to append terminating characters, other users will + // not care very much about the extra memory. + StringSink sink(reserveSize + 2); + drainFD(fd, sink, block); + return std::move(sink.s); +} + + +void drainFD(int fd, Sink & sink, bool block) +{ + // silence GCC maybe-uninitialized warning in finally + int saved = 0; + + if (!block) { + saved = fcntl(fd, F_GETFL); + if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) + throw SysError("making file descriptor non-blocking"); + } + + Finally finally([&]() { + if (!block) { + if (fcntl(fd, F_SETFL, saved) == -1) + throw SysError("making file descriptor blocking"); + } + }); + + std::vector buf(64 * 1024); + while (1) { + checkInterrupt(); + ssize_t rd = read(fd, buf.data(), buf.size()); + if (rd == -1) { + if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) + break; + if (errno != EINTR) + throw SysError("reading from file"); + } + else if (rd == 0) break; + else sink({(char *) buf.data(), (size_t) rd}); + } +} + +////////////////////////////////////////////////////////////////////// + +AutoCloseFD::AutoCloseFD() : fd{-1} {} + + +AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {} + + +AutoCloseFD::AutoCloseFD(AutoCloseFD && that) : fd{that.fd} +{ + that.fd = -1; +} + + +AutoCloseFD & AutoCloseFD::operator =(AutoCloseFD && that) +{ + close(); + fd = that.fd; + that.fd = -1; + return *this; +} + + +AutoCloseFD::~AutoCloseFD() +{ + try { + close(); + } catch (...) { + ignoreException(); + } +} + + +int AutoCloseFD::get() const +{ + return fd; +} + + +void AutoCloseFD::close() +{ + if (fd != -1) { + if (::close(fd) == -1) + /* This should never happen. */ + throw SysError("closing file descriptor %1%", fd); + fd = -1; + } +} + +void AutoCloseFD::fsync() +{ + if (fd != -1) { + int result; +#if __APPLE__ + result = ::fcntl(fd, F_FULLFSYNC); +#else + result = ::fsync(fd); +#endif + if (result == -1) + throw SysError("fsync file descriptor %1%", fd); + } +} + + +AutoCloseFD::operator bool() const +{ + return fd != -1; +} + + +int AutoCloseFD::release() +{ + int oldFD = fd; + fd = -1; + return oldFD; +} + + +void Pipe::create() +{ + int fds[2]; +#if HAVE_PIPE2 + if (pipe2(fds, O_CLOEXEC) != 0) throw SysError("creating pipe"); +#else + if (pipe(fds) != 0) throw SysError("creating pipe"); + closeOnExec(fds[0]); + closeOnExec(fds[1]); +#endif + readSide = fds[0]; + writeSide = fds[1]; +} + + +void Pipe::close() +{ + readSide.close(); + writeSide.close(); +} + +////////////////////////////////////////////////////////////////////// + +void closeMostFDs(const std::set & exceptions) +{ +#if __linux__ + try { + for (auto & s : readDirectory("/proc/self/fd")) { + auto fd = std::stoi(s.name); + if (!exceptions.count(fd)) { + debug("closing leaked FD %d", fd); + close(fd); + } + } + return; + } catch (SysError &) { + } +#endif + + int maxFD = 0; + maxFD = sysconf(_SC_OPEN_MAX); + for (int fd = 0; fd < maxFD; ++fd) + if (!exceptions.count(fd)) + close(fd); /* ignore result */ +} + + +void closeOnExec(int fd) +{ + int prev; + if ((prev = fcntl(fd, F_GETFD, 0)) == -1 || + fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1) + throw SysError("setting close-on-exec flag"); +} + +} diff --git a/src/libutil/file-descriptor.hh b/src/libutil/file-descriptor.hh new file mode 100644 index 000000000..80ec86135 --- /dev/null +++ b/src/libutil/file-descriptor.hh @@ -0,0 +1,84 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" + +namespace nix { + +struct Sink; +struct Source; + +/** + * Read the contents of a resource into a string. + */ +std::string readFile(int fd); + +/** + * Wrappers arount read()/write() that read/write exactly the + * requested number of bytes. + */ +void readFull(int fd, char * buf, size_t count); + +void writeFull(int fd, std::string_view s, bool allowInterrupts = true); + +/** + * Read a line from a file descriptor. + */ +std::string readLine(int fd); + +/** + * Write a line to a file descriptor. + */ +void writeLine(int fd, std::string s); + +/** + * Read a file descriptor until EOF occurs. + */ +std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); + +void drainFD(int fd, Sink & sink, bool block = true); + +/** + * Automatic cleanup of resources. + */ +class AutoCloseFD +{ + int fd; +public: + AutoCloseFD(); + AutoCloseFD(int fd); + AutoCloseFD(const AutoCloseFD & fd) = delete; + AutoCloseFD(AutoCloseFD&& fd); + ~AutoCloseFD(); + AutoCloseFD& operator =(const AutoCloseFD & fd) = delete; + AutoCloseFD& operator =(AutoCloseFD&& fd); + int get() const; + explicit operator bool() const; + int release(); + void close(); + void fsync(); +}; + +class Pipe +{ +public: + AutoCloseFD readSide, writeSide; + void create(); + void close(); +}; + +/** + * Close all file descriptors except those listed in the given set. + * Good practice in child processes. + */ +void closeMostFDs(const std::set & exceptions); + +/** + * Set the close-on-exec flag for the given file descriptor. + */ +void closeOnExec(int fd); + +MakeError(EndOfFile, Error); + +} diff --git a/src/libutil/file-system.cc b/src/libutil/file-system.cc new file mode 100644 index 000000000..c96effff9 --- /dev/null +++ b/src/libutil/file-system.cc @@ -0,0 +1,647 @@ +#include "environment-variables.hh" +#include "file-system.hh" +#include "signals.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace fs = std::filesystem; + +namespace nix { + +Path absPath(Path path, std::optional dir, bool resolveSymlinks) +{ + if (path[0] != '/') { + if (!dir) { +#ifdef __GNU__ + /* GNU (aka. GNU/Hurd) doesn't have any limitation on path + lengths and doesn't define `PATH_MAX'. */ + char *buf = getcwd(NULL, 0); + if (buf == NULL) +#else + char buf[PATH_MAX]; + if (!getcwd(buf, sizeof(buf))) +#endif + throw SysError("cannot get cwd"); + path = concatStrings(buf, "/", path); +#ifdef __GNU__ + free(buf); +#endif + } else + path = concatStrings(*dir, "/", path); + } + return canonPath(path, resolveSymlinks); +} + + +Path canonPath(PathView path, bool resolveSymlinks) +{ + assert(path != ""); + + std::string s; + s.reserve(256); + + if (path[0] != '/') + throw Error("not an absolute path: '%1%'", path); + + std::string temp; + + /* Count the number of times we follow a symlink and stop at some + arbitrary (but high) limit to prevent infinite loops. */ + unsigned int followCount = 0, maxFollow = 1024; + + while (1) { + + /* Skip slashes. */ + while (!path.empty() && path[0] == '/') path.remove_prefix(1); + if (path.empty()) break; + + /* Ignore `.'. */ + if (path == "." || path.substr(0, 2) == "./") + path.remove_prefix(1); + + /* If `..', delete the last component. */ + else if (path == ".." || path.substr(0, 3) == "../") + { + if (!s.empty()) s.erase(s.rfind('/')); + path.remove_prefix(2); + } + + /* Normal component; copy it. */ + else { + s += '/'; + if (const auto slash = path.find('/'); slash == std::string::npos) { + s += path; + path = {}; + } else { + s += path.substr(0, slash); + path = path.substr(slash); + } + + /* If s points to a symlink, resolve it and continue from there */ + if (resolveSymlinks && isLink(s)) { + if (++followCount >= maxFollow) + throw Error("infinite symlink recursion in path '%1%'", path); + temp = concatStrings(readLink(s), path); + path = temp; + if (!temp.empty() && temp[0] == '/') { + s.clear(); /* restart for symlinks pointing to absolute path */ + } else { + s = dirOf(s); + if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = / + s.clear(); + } + } + } + } + } + + return s.empty() ? "/" : std::move(s); +} + + +Path dirOf(const PathView path) +{ + Path::size_type pos = path.rfind('/'); + if (pos == std::string::npos) + return "."; + return pos == 0 ? "/" : Path(path, 0, pos); +} + + +std::string_view baseNameOf(std::string_view path) +{ + if (path.empty()) + return ""; + + auto last = path.size() - 1; + if (path[last] == '/' && last > 0) + last -= 1; + + auto pos = path.rfind('/', last); + if (pos == std::string::npos) + pos = 0; + else + pos += 1; + + return path.substr(pos, last - pos + 1); +} + + +bool isInDir(std::string_view path, std::string_view dir) +{ + return path.substr(0, 1) == "/" + && path.substr(0, dir.size()) == dir + && path.size() >= dir.size() + 2 + && path[dir.size()] == '/'; +} + + +bool isDirOrInDir(std::string_view path, std::string_view dir) +{ + return path == dir || isInDir(path, dir); +} + + +struct stat stat(const Path & path) +{ + struct stat st; + if (stat(path.c_str(), &st)) + throw SysError("getting status of '%1%'", path); + return st; +} + + +struct stat lstat(const Path & path) +{ + struct stat st; + if (lstat(path.c_str(), &st)) + throw SysError("getting status of '%1%'", path); + return st; +} + + +bool pathExists(const Path & path) +{ + int res; + struct stat st; + res = lstat(path.c_str(), &st); + if (!res) return true; + if (errno != ENOENT && errno != ENOTDIR) + throw SysError("getting status of %1%", path); + return false; +} + +bool pathAccessible(const Path & path) +{ + try { + return pathExists(path); + } catch (SysError & e) { + // swallow EPERM + if (e.errNo == EPERM) return false; + throw; + } +} + + +Path readLink(const Path & path) +{ + checkInterrupt(); + std::vector buf; + for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) { + buf.resize(bufSize); + ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); + if (rlSize == -1) + if (errno == EINVAL) + throw Error("'%1%' is not a symlink", path); + else + throw SysError("reading symbolic link '%1%'", path); + else if (rlSize < bufSize) + return std::string(buf.data(), rlSize); + } +} + + +bool isLink(const Path & path) +{ + struct stat st = lstat(path); + return S_ISLNK(st.st_mode); +} + + +DirEntries readDirectory(DIR *dir, const Path & path) +{ + DirEntries entries; + entries.reserve(64); + + struct dirent * dirent; + while (errno = 0, dirent = readdir(dir)) { /* sic */ + checkInterrupt(); + std::string name = dirent->d_name; + if (name == "." || name == "..") continue; + entries.emplace_back(name, dirent->d_ino, +#ifdef HAVE_STRUCT_DIRENT_D_TYPE + dirent->d_type +#else + DT_UNKNOWN +#endif + ); + } + if (errno) throw SysError("reading directory '%1%'", path); + + return entries; +} + +DirEntries readDirectory(const Path & path) +{ + AutoCloseDir dir(opendir(path.c_str())); + if (!dir) throw SysError("opening directory '%1%'", path); + + return readDirectory(dir.get(), path); +} + + +unsigned char getFileType(const Path & path) +{ + struct stat st = lstat(path); + if (S_ISDIR(st.st_mode)) return DT_DIR; + if (S_ISLNK(st.st_mode)) return DT_LNK; + if (S_ISREG(st.st_mode)) return DT_REG; + return DT_UNKNOWN; +} + + +std::string readFile(const Path & path) +{ + AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) + throw SysError("opening file '%1%'", path); + return readFile(fd.get()); +} + + +void readFile(const Path & path, Sink & sink) +{ + AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); + if (!fd) + throw SysError("opening file '%s'", path); + drainFD(fd.get(), sink); +} + + +void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) +{ + AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); + if (!fd) + throw SysError("opening file '%1%'", path); + try { + writeFull(fd.get(), s); + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; + } + if (sync) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); + if (sync) + syncParent(path); +} + + +void writeFile(const Path & path, Source & source, mode_t mode, bool sync) +{ + AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); + if (!fd) + throw SysError("opening file '%1%'", path); + + std::vector buf(64 * 1024); + + try { + while (true) { + try { + auto n = source.read(buf.data(), buf.size()); + writeFull(fd.get(), {buf.data(), n}); + } catch (EndOfFile &) { break; } + } + } catch (Error & e) { + e.addTrace({}, "writing file '%1%'", path); + throw; + } + if (sync) + fd.fsync(); + // Explicitly close to make sure exceptions are propagated. + fd.close(); + if (sync) + syncParent(path); +} + +void syncParent(const Path & path) +{ + AutoCloseFD fd = open(dirOf(path).c_str(), O_RDONLY, 0); + if (!fd) + throw SysError("opening file '%1%'", path); + fd.fsync(); +} + + +static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed) +{ + checkInterrupt(); + + std::string name(baseNameOf(path)); + + struct stat st; + if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { + if (errno == ENOENT) return; + throw SysError("getting status of '%1%'", path); + } + + if (!S_ISDIR(st.st_mode)) { + /* We are about to delete a file. Will it likely free space? */ + + switch (st.st_nlink) { + /* Yes: last link. */ + case 1: + bytesFreed += st.st_size; + break; + /* Maybe: yes, if 'auto-optimise-store' or manual optimisation + was performed. Instead of checking for real let's assume + it's an optimised file and space will be freed. + + In worst case we will double count on freed space for files + with exactly two hardlinks for unoptimised packages. + */ + case 2: + bytesFreed += st.st_size; + break; + /* No: 3+ links. */ + default: + break; + } + } + + if (S_ISDIR(st.st_mode)) { + /* Make the directory accessible. */ + const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; + if ((st.st_mode & PERM_MASK) != PERM_MASK) { + if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) + throw SysError("chmod '%1%'", path); + } + + int fd = openat(parentfd, path.c_str(), O_RDONLY); + if (fd == -1) + throw SysError("opening directory '%1%'", path); + AutoCloseDir dir(fdopendir(fd)); + if (!dir) + throw SysError("opening directory '%1%'", path); + for (auto & i : readDirectory(dir.get(), path)) + _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed); + } + + int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; + if (unlinkat(parentfd, name.c_str(), flags) == -1) { + if (errno == ENOENT) return; + throw SysError("cannot unlink '%1%'", path); + } +} + +static void _deletePath(const Path & path, uint64_t & bytesFreed) +{ + Path dir = dirOf(path); + if (dir == "") + dir = "/"; + + AutoCloseFD dirfd{open(dir.c_str(), O_RDONLY)}; + if (!dirfd) { + if (errno == ENOENT) return; + throw SysError("opening directory '%1%'", path); + } + + _deletePath(dirfd.get(), path, bytesFreed); +} + + +void deletePath(const Path & path) +{ + uint64_t dummy; + deletePath(path, dummy); +} + + +Paths createDirs(const Path & path) +{ + Paths created; + if (path == "/") return created; + + struct stat st; + if (lstat(path.c_str(), &st) == -1) { + created = createDirs(dirOf(path)); + if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST) + throw SysError("creating directory '%1%'", path); + st = lstat(path); + created.push_back(path); + } + + if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1) + throw SysError("statting symlink '%1%'", path); + + if (!S_ISDIR(st.st_mode)) throw Error("'%1%' is not a directory", path); + + return created; +} + + +void deletePath(const Path & path, uint64_t & bytesFreed) +{ + //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path); + bytesFreed = 0; + _deletePath(path, bytesFreed); +} + + +////////////////////////////////////////////////////////////////////// + +AutoDelete::AutoDelete() : del{false} {} + +AutoDelete::AutoDelete(const std::string & p, bool recursive) : path(p) +{ + del = true; + this->recursive = recursive; +} + +AutoDelete::~AutoDelete() +{ + try { + if (del) { + if (recursive) + deletePath(path); + else { + if (remove(path.c_str()) == -1) + throw SysError("cannot unlink '%1%'", path); + } + } + } catch (...) { + ignoreException(); + } +} + +void AutoDelete::cancel() +{ + del = false; +} + +void AutoDelete::reset(const Path & p, bool recursive) { + path = p; + this->recursive = recursive; + del = true; +} + +////////////////////////////////////////////////////////////////////// + +////////////////////////////////////////////////////////////////////// + +static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, + std::atomic & counter) +{ + tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); + if (includePid) + return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++); + else + return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++); +} + +Path createTempDir(const Path & tmpRoot, const Path & prefix, + bool includePid, bool useGlobalCounter, mode_t mode) +{ + static std::atomic globalCounter = 0; + std::atomic localCounter = 0; + auto & counter(useGlobalCounter ? globalCounter : localCounter); + + while (1) { + checkInterrupt(); + Path tmpDir = tempName(tmpRoot, prefix, includePid, counter); + if (mkdir(tmpDir.c_str(), mode) == 0) { +#if __FreeBSD__ + /* Explicitly set the group of the directory. This is to + work around around problems caused by BSD's group + ownership semantics (directories inherit the group of + the parent). For instance, the group of /tmp on + FreeBSD is "wheel", so all directories created in /tmp + will be owned by "wheel"; but if the user is not in + "wheel", then "tar" will fail to unpack archives that + have the setgid bit set on directories. */ + if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0) + throw SysError("setting group of directory '%1%'", tmpDir); +#endif + return tmpDir; + } + if (errno != EEXIST) + throw SysError("creating directory '%1%'", tmpDir); + } +} + + +std::pair createTempFile(const Path & prefix) +{ + Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX"); + // Strictly speaking, this is UB, but who cares... + // FIXME: use O_TMPFILE. + AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); + if (!fd) + throw SysError("creating temporary file '%s'", tmpl); + closeOnExec(fd.get()); + return {std::move(fd), tmpl}; +} + +void createSymlink(const Path & target, const Path & link) +{ + if (symlink(target.c_str(), link.c_str())) + throw SysError("creating symlink from '%1%' to '%2%'", link, target); +} + +void replaceSymlink(const Path & target, const Path & link) +{ + for (unsigned int n = 0; true; n++) { + Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); + + try { + createSymlink(target, tmp); + } catch (SysError & e) { + if (e.errNo == EEXIST) continue; + throw; + } + + renameFile(tmp, link); + + break; + } +} + +void setWriteTime(const fs::path & p, const struct stat & st) +{ + struct timeval times[2]; + times[0] = { + .tv_sec = st.st_atime, + .tv_usec = 0, + }; + times[1] = { + .tv_sec = st.st_mtime, + .tv_usec = 0, + }; + if (lutimes(p.c_str(), times) != 0) + throw SysError("changing modification time of '%s'", p); +} + +void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) +{ + // TODO: Rewrite the `is_*` to use `symlink_status()` + auto statOfFrom = lstat(from.path().c_str()); + auto fromStatus = from.symlink_status(); + + // Mark the directory as writable so that we can delete its children + if (andDelete && fs::is_directory(fromStatus)) { + fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + } + + + if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) { + fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); + } else if (fs::is_directory(fromStatus)) { + fs::create_directory(to); + for (auto & entry : fs::directory_iterator(from.path())) { + copy(entry, to / entry.path().filename(), andDelete); + } + } else { + throw Error("file '%s' has an unsupported type", from.path()); + } + + setWriteTime(to, statOfFrom); + if (andDelete) { + if (!fs::is_symlink(fromStatus)) + fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); + fs::remove(from.path()); + } +} + +void renameFile(const Path & oldName, const Path & newName) +{ + fs::rename(oldName, newName); +} + +void moveFile(const Path & oldName, const Path & newName) +{ + try { + renameFile(oldName, newName); + } catch (fs::filesystem_error & e) { + auto oldPath = fs::path(oldName); + auto newPath = fs::path(newName); + // For the move to be as atomic as possible, copy to a temporary + // directory + fs::path temp = createTempDir(newPath.parent_path(), "rename-tmp"); + Finally removeTemp = [&]() { fs::remove(temp); }; + auto tempCopyTarget = temp / "copy-target"; + if (e.code().value() == EXDEV) { + fs::remove(newPath); + warn("Can’t rename %s as %s, copying instead", oldName, newName); + copy(fs::directory_entry(oldPath), tempCopyTarget, true); + renameFile(tempCopyTarget, newPath); + } + } +} + +////////////////////////////////////////////////////////////////////// + +} diff --git a/src/libutil/file-system.hh b/src/libutil/file-system.hh new file mode 100644 index 000000000..4637507b3 --- /dev/null +++ b/src/libutil/file-system.hh @@ -0,0 +1,238 @@ +#pragma once +/** + * @file + * + * Utiltities for working with the file sytem and file paths. + */ + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "file-descriptor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#ifndef HAVE_STRUCT_DIRENT_D_TYPE +#define DT_UNKNOWN 0 +#define DT_REG 1 +#define DT_LNK 2 +#define DT_DIR 3 +#endif + +namespace nix { + +struct Sink; +struct Source; + +/** + * @return An absolutized path, resolving paths relative to the + * specified directory, or the current directory otherwise. The path + * is also canonicalised. + */ +Path absPath(Path path, + std::optional dir = {}, + bool resolveSymlinks = false); + +/** + * Canonicalise a path by removing all `.` or `..` components and + * double or trailing slashes. Optionally resolves all symlink + * components such that each component of the resulting path is *not* + * a symbolic link. + */ +Path canonPath(PathView path, bool resolveSymlinks = false); + +/** + * @return The directory part of the given canonical path, i.e., + * everything before the final `/`. If the path is the root or an + * immediate child thereof (e.g., `/foo`), this means `/` + * is returned. + */ +Path dirOf(const PathView path); + +/** + * @return the base name of the given canonical path, i.e., everything + * following the final `/` (trailing slashes are removed). + */ +std::string_view baseNameOf(std::string_view path); + +/** + * Check whether 'path' is a descendant of 'dir'. Both paths must be + * canonicalized. + */ +bool isInDir(std::string_view path, std::string_view dir); + +/** + * Check whether 'path' is equal to 'dir' or a descendant of + * 'dir'. Both paths must be canonicalized. + */ +bool isDirOrInDir(std::string_view path, std::string_view dir); + +/** + * Get status of `path`. + */ +struct stat stat(const Path & path); +struct stat lstat(const Path & path); + +/** + * @return true iff the given path exists. + */ +bool pathExists(const Path & path); + +/** + * A version of pathExists that returns false on a permission error. + * Useful for inferring default paths across directories that might not + * be readable. + * @return true iff the given path can be accessed and exists + */ +bool pathAccessible(const Path & path); + +/** + * Read the contents (target) of a symbolic link. The result is not + * in any way canonicalised. + */ +Path readLink(const Path & path); + +bool isLink(const Path & path); + +/** + * Read the contents of a directory. The entries `.` and `..` are + * removed. + */ +struct DirEntry +{ + std::string name; + ino_t ino; + /** + * one of DT_* + */ + unsigned char type; + DirEntry(std::string name, ino_t ino, unsigned char type) + : name(std::move(name)), ino(ino), type(type) { } +}; + +typedef std::vector DirEntries; + +DirEntries readDirectory(const Path & path); + +unsigned char getFileType(const Path & path); + +/** + * Read the contents of a file into a string. + */ +std::string readFile(const Path & path); +void readFile(const Path & path, Sink & sink); + +/** + * Write a string to a file. + */ +void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); + +void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); + +/** + * Flush a file's parent directory to disk + */ +void syncParent(const Path & path); + +/** + * Delete a path; i.e., in the case of a directory, it is deleted + * recursively. It's not an error if the path does not exist. The + * second variant returns the number of bytes and blocks freed. + */ +void deletePath(const Path & path); + +void deletePath(const Path & path, uint64_t & bytesFreed); + +/** + * Create a directory and all its parents, if necessary. Returns the + * list of created directories, in order of creation. + */ +Paths createDirs(const Path & path); +inline Paths createDirs(PathView path) +{ + return createDirs(Path(path)); +} + +/** + * Create a symlink. + */ +void createSymlink(const Path & target, const Path & link); + +/** + * Atomically create or replace a symlink. + */ +void replaceSymlink(const Path & target, const Path & link); + +void renameFile(const Path & src, const Path & dst); + +/** + * Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst` + * are on a different filesystem. + * + * Beware that this might not be atomic because of the copy that happens behind + * the scenes + */ +void moveFile(const Path & src, const Path & dst); + + +/** + * Automatic cleanup of resources. + */ +class AutoDelete +{ + Path path; + bool del; + bool recursive; +public: + AutoDelete(); + AutoDelete(const Path & p, bool recursive = true); + ~AutoDelete(); + void cancel(); + void reset(const Path & p, bool recursive = true); + operator Path() const { return path; } + operator PathView() const { return path; } +}; + + +struct DIRDeleter +{ + void operator()(DIR * dir) const { + closedir(dir); + } +}; + +typedef std::unique_ptr AutoCloseDir; + + +/** + * Create a temporary directory. + */ +Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", + bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); + +/** + * Create a temporary file, returning a file handle and its path. + */ +std::pair createTempFile(const Path & prefix = "nix"); + + +/** + * Used in various places. + */ +typedef std::function PathFilter; + +extern PathFilter defaultPathFilter; + +} diff --git a/src/libutil/filesystem.cc b/src/libutil/filesystem.cc deleted file mode 100644 index 11cc0c0e7..000000000 --- a/src/libutil/filesystem.cc +++ /dev/null @@ -1,162 +0,0 @@ -#include -#include -#include - -#include "finally.hh" -#include "util.hh" -#include "types.hh" - -namespace fs = std::filesystem; - -namespace nix { - -static Path tempName(Path tmpRoot, const Path & prefix, bool includePid, - std::atomic & counter) -{ - tmpRoot = canonPath(tmpRoot.empty() ? getEnv("TMPDIR").value_or("/tmp") : tmpRoot, true); - if (includePid) - return fmt("%1%/%2%-%3%-%4%", tmpRoot, prefix, getpid(), counter++); - else - return fmt("%1%/%2%-%3%", tmpRoot, prefix, counter++); -} - -Path createTempDir(const Path & tmpRoot, const Path & prefix, - bool includePid, bool useGlobalCounter, mode_t mode) -{ - static std::atomic globalCounter = 0; - std::atomic localCounter = 0; - auto & counter(useGlobalCounter ? globalCounter : localCounter); - - while (1) { - checkInterrupt(); - Path tmpDir = tempName(tmpRoot, prefix, includePid, counter); - if (mkdir(tmpDir.c_str(), mode) == 0) { -#if __FreeBSD__ - /* Explicitly set the group of the directory. This is to - work around around problems caused by BSD's group - ownership semantics (directories inherit the group of - the parent). For instance, the group of /tmp on - FreeBSD is "wheel", so all directories created in /tmp - will be owned by "wheel"; but if the user is not in - "wheel", then "tar" will fail to unpack archives that - have the setgid bit set on directories. */ - if (chown(tmpDir.c_str(), (uid_t) -1, getegid()) != 0) - throw SysError("setting group of directory '%1%'", tmpDir); -#endif - return tmpDir; - } - if (errno != EEXIST) - throw SysError("creating directory '%1%'", tmpDir); - } -} - - -std::pair createTempFile(const Path & prefix) -{ - Path tmpl(getEnv("TMPDIR").value_or("/tmp") + "/" + prefix + ".XXXXXX"); - // Strictly speaking, this is UB, but who cares... - // FIXME: use O_TMPFILE. - AutoCloseFD fd(mkstemp((char *) tmpl.c_str())); - if (!fd) - throw SysError("creating temporary file '%s'", tmpl); - closeOnExec(fd.get()); - return {std::move(fd), tmpl}; -} - -void createSymlink(const Path & target, const Path & link) -{ - if (symlink(target.c_str(), link.c_str())) - throw SysError("creating symlink from '%1%' to '%2%'", link, target); -} - -void replaceSymlink(const Path & target, const Path & link) -{ - for (unsigned int n = 0; true; n++) { - Path tmp = canonPath(fmt("%s/.%d_%s", dirOf(link), n, baseNameOf(link))); - - try { - createSymlink(target, tmp); - } catch (SysError & e) { - if (e.errNo == EEXIST) continue; - throw; - } - - renameFile(tmp, link); - - break; - } -} - -void setWriteTime(const fs::path & p, const struct stat & st) -{ - struct timeval times[2]; - times[0] = { - .tv_sec = st.st_atime, - .tv_usec = 0, - }; - times[1] = { - .tv_sec = st.st_mtime, - .tv_usec = 0, - }; - if (lutimes(p.c_str(), times) != 0) - throw SysError("changing modification time of '%s'", p); -} - -void copy(const fs::directory_entry & from, const fs::path & to, bool andDelete) -{ - // TODO: Rewrite the `is_*` to use `symlink_status()` - auto statOfFrom = lstat(from.path().c_str()); - auto fromStatus = from.symlink_status(); - - // Mark the directory as writable so that we can delete its children - if (andDelete && fs::is_directory(fromStatus)) { - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); - } - - - if (fs::is_symlink(fromStatus) || fs::is_regular_file(fromStatus)) { - fs::copy(from.path(), to, fs::copy_options::copy_symlinks | fs::copy_options::overwrite_existing); - } else if (fs::is_directory(fromStatus)) { - fs::create_directory(to); - for (auto & entry : fs::directory_iterator(from.path())) { - copy(entry, to / entry.path().filename(), andDelete); - } - } else { - throw Error("file '%s' has an unsupported type", from.path()); - } - - setWriteTime(to, statOfFrom); - if (andDelete) { - if (!fs::is_symlink(fromStatus)) - fs::permissions(from.path(), fs::perms::owner_write, fs::perm_options::add | fs::perm_options::nofollow); - fs::remove(from.path()); - } -} - -void renameFile(const Path & oldName, const Path & newName) -{ - fs::rename(oldName, newName); -} - -void moveFile(const Path & oldName, const Path & newName) -{ - try { - renameFile(oldName, newName); - } catch (fs::filesystem_error & e) { - auto oldPath = fs::path(oldName); - auto newPath = fs::path(newName); - // For the move to be as atomic as possible, copy to a temporary - // directory - fs::path temp = createTempDir(newPath.parent_path(), "rename-tmp"); - Finally removeTemp = [&]() { fs::remove(temp); }; - auto tempCopyTarget = temp / "copy-target"; - if (e.code().value() == EXDEV) { - fs::remove(newPath); - warn("Can’t rename %s as %s, copying instead", oldName, newName); - copy(fs::directory_entry(oldPath), tempCopyTarget, true); - renameFile(tempCopyTarget, newPath); - } - } -} - -} diff --git a/src/libutil/fs-sink.hh b/src/libutil/fs-sink.hh index c22edd390..bf54b7301 100644 --- a/src/libutil/fs-sink.hh +++ b/src/libutil/fs-sink.hh @@ -4,6 +4,7 @@ #include "types.hh" #include "serialise.hh" #include "source-accessor.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/hash.cc b/src/libutil/hash.cc index e297c245b..144f7ae7e 100644 --- a/src/libutil/hash.cc +++ b/src/libutil/hash.cc @@ -9,7 +9,6 @@ #include "hash.hh" #include "archive.hh" #include "split.hh" -#include "util.hh" #include #include diff --git a/src/libutil/hash.hh b/src/libutil/hash.hh index cab3e6eca..6ade6555c 100644 --- a/src/libutil/hash.hh +++ b/src/libutil/hash.hh @@ -3,6 +3,7 @@ #include "types.hh" #include "serialise.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/logging.cc b/src/libutil/logging.cc index 9d7a141b3..60b0865bf 100644 --- a/src/libutil/logging.cc +++ b/src/libutil/logging.cc @@ -1,4 +1,7 @@ #include "logging.hh" +#include "file-descriptor.hh" +#include "environment-variables.hh" +#include "terminal.hh" #include "util.hh" #include "config.hh" diff --git a/src/libutil/monitor-fd.hh b/src/libutil/monitor-fd.hh index 86d0115fc..228fb13f8 100644 --- a/src/libutil/monitor-fd.hh +++ b/src/libutil/monitor-fd.hh @@ -10,6 +10,8 @@ #include #include +#include "signals.hh" + namespace nix { diff --git a/src/libutil/namespaces.cc b/src/libutil/namespaces.cc index f66accb10..a789b321e 100644 --- a/src/libutil/namespaces.cc +++ b/src/libutil/namespaces.cc @@ -1,13 +1,22 @@ -#if __linux__ - -#include "namespaces.hh" +#include "current-process.hh" #include "util.hh" #include "finally.hh" +#include "file-system.hh" +#include "processes.hh" +#include "signals.hh" + +#if __linux__ +# include +# include +# include "cgroup.hh" +#endif #include namespace nix { +#if __linux__ + bool userNamespacesSupported() { static auto res = [&]() -> bool @@ -92,6 +101,60 @@ bool mountAndPidNamespacesSupported() return res; } +#endif + + +////////////////////////////////////////////////////////////////////// + +#if __linux__ +static AutoCloseFD fdSavedMountNamespace; +static AutoCloseFD fdSavedRoot; +#endif + +void saveMountNamespace() +{ +#if __linux__ + static std::once_flag done; + std::call_once(done, []() { + fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY); + if (!fdSavedMountNamespace) + throw SysError("saving parent mount namespace"); + + fdSavedRoot = open("/proc/self/root", O_RDONLY); + }); +#endif } +void restoreMountNamespace() +{ +#if __linux__ + try { + auto savedCwd = absPath("."); + + if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1) + throw SysError("restoring parent mount namespace"); + + if (fdSavedRoot) { + if (fchdir(fdSavedRoot.get())) + throw SysError("chdir into saved root"); + if (chroot(".")) + throw SysError("chroot into saved root"); + } + + if (chdir(savedCwd.c_str()) == -1) + throw SysError("restoring cwd"); + } catch (Error & e) { + debug(e.msg()); + } #endif +} + +void unshareFilesystem() +{ +#ifdef __linux__ + if (unshare(CLONE_FS) != 0 && errno != EPERM) + throw SysError("unsharing filesystem state in download thread"); +#endif +} + +} diff --git a/src/libutil/namespaces.hh b/src/libutil/namespaces.hh index 0b7eeb66c..7e4e921a8 100644 --- a/src/libutil/namespaces.hh +++ b/src/libutil/namespaces.hh @@ -1,8 +1,31 @@ #pragma once ///@file +#include + +#include "types.hh" + namespace nix { +/** + * Save the current mount namespace. Ignored if called more than + * once. + */ +void saveMountNamespace(); + +/** + * Restore the mount namespace saved by saveMountNamespace(). Ignored + * if saveMountNamespace() was never called. + */ +void restoreMountNamespace(); + +/** + * Cause this thread to not share any FS attributes with the main + * thread, because this causes setns() in restoreMountNamespace() to + * fail. + */ +void unshareFilesystem(); + #if __linux__ bool userNamespacesSupported(); diff --git a/src/libutil/posix-source-accessor.cc b/src/libutil/posix-source-accessor.cc index d5e32d989..dc96f84e5 100644 --- a/src/libutil/posix-source-accessor.cc +++ b/src/libutil/posix-source-accessor.cc @@ -1,4 +1,5 @@ #include "posix-source-accessor.hh" +#include "signals.hh" namespace nix { diff --git a/src/libutil/processes.cc b/src/libutil/processes.cc new file mode 100644 index 000000000..91a0ea66f --- /dev/null +++ b/src/libutil/processes.cc @@ -0,0 +1,421 @@ +#include "current-process.hh" +#include "environment-variables.hh" +#include "signals.hh" +#include "processes.hh" +#include "finally.hh" +#include "serialise.hh" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef __APPLE__ +# include +#endif + +#ifdef __linux__ +# include +# include +#endif + + +namespace nix { + +Pid::Pid() +{ +} + + +Pid::Pid(pid_t pid) + : pid(pid) +{ +} + + +Pid::~Pid() +{ + if (pid != -1) kill(); +} + + +void Pid::operator =(pid_t pid) +{ + if (this->pid != -1 && this->pid != pid) kill(); + this->pid = pid; + killSignal = SIGKILL; // reset signal to default +} + + +Pid::operator pid_t() +{ + return pid; +} + + +int Pid::kill() +{ + assert(pid != -1); + + debug("killing process %1%", pid); + + /* Send the requested signal to the child. If it has its own + process group, send the signal to every process in the child + process group (which hopefully includes *all* its children). */ + if (::kill(separatePG ? -pid : pid, killSignal) != 0) { + /* On BSDs, killing a process group will return EPERM if all + processes in the group are zombies (or something like + that). So try to detect and ignore that situation. */ +#if __FreeBSD__ || __APPLE__ + if (errno != EPERM || ::kill(pid, 0) != 0) +#endif + logError(SysError("killing process %d", pid).info()); + } + + return wait(); +} + + +int Pid::wait() +{ + assert(pid != -1); + while (1) { + int status; + int res = waitpid(pid, &status, 0); + if (res == pid) { + pid = -1; + return status; + } + if (errno != EINTR) + throw SysError("cannot get exit status of PID %d", pid); + checkInterrupt(); + } +} + + +void Pid::setSeparatePG(bool separatePG) +{ + this->separatePG = separatePG; +} + + +void Pid::setKillSignal(int signal) +{ + this->killSignal = signal; +} + + +pid_t Pid::release() +{ + pid_t p = pid; + pid = -1; + return p; +} + + +void killUser(uid_t uid) +{ + debug("killing all processes running under uid '%1%'", uid); + + assert(uid != 0); /* just to be safe... */ + + /* The system call kill(-1, sig) sends the signal `sig' to all + users to which the current process can send signals. So we + fork a process, switch to uid, and send a mass kill. */ + + Pid pid = startProcess([&]() { + + if (setuid(uid) == -1) + throw SysError("setting uid"); + + while (true) { +#ifdef __APPLE__ + /* OSX's kill syscall takes a third parameter that, among + other things, determines if kill(-1, signo) affects the + calling process. In the OSX libc, it's set to true, + which means "follow POSIX", which we don't want here + */ + if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break; +#else + if (kill(-1, SIGKILL) == 0) break; +#endif + if (errno == ESRCH || errno == EPERM) break; /* no more processes */ + if (errno != EINTR) + throw SysError("cannot kill processes for uid '%1%'", uid); + } + + _exit(0); + }); + + int status = pid.wait(); + if (status != 0) + throw Error("cannot kill processes for uid '%1%': %2%", uid, statusToString(status)); + + /* !!! We should really do some check to make sure that there are + no processes left running under `uid', but there is no portable + way to do so (I think). The most reliable way may be `ps -eo + uid | grep -q $uid'. */ +} + + +////////////////////////////////////////////////////////////////////// + + +/* Wrapper around vfork to prevent the child process from clobbering + the caller's stack frame in the parent. */ +static pid_t doFork(bool allowVfork, std::function fun) __attribute__((noinline)); +static pid_t doFork(bool allowVfork, std::function fun) +{ +#ifdef __linux__ + pid_t pid = allowVfork ? vfork() : fork(); +#else + pid_t pid = fork(); +#endif + if (pid != 0) return pid; + fun(); + abort(); +} + + +#if __linux__ +static int childEntry(void * arg) +{ + auto main = (std::function *) arg; + (*main)(); + return 1; +} +#endif + + +pid_t startProcess(std::function fun, const ProcessOptions & options) +{ + std::function wrapper = [&]() { + if (!options.allowVfork) + logger = makeSimpleLogger(); + try { +#if __linux__ + if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) + throw SysError("setting death signal"); +#endif + fun(); + } catch (std::exception & e) { + try { + std::cerr << options.errorPrefix << e.what() << "\n"; + } catch (...) { } + } catch (...) { } + if (options.runExitHandlers) + exit(1); + else + _exit(1); + }; + + pid_t pid = -1; + + if (options.cloneFlags) { + #ifdef __linux__ + // Not supported, since then we don't know when to free the stack. + assert(!(options.cloneFlags & CLONE_VM)); + + size_t stackSize = 1 * 1024 * 1024; + auto stack = (char *) mmap(0, stackSize, + PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) throw SysError("allocating stack"); + + Finally freeStack([&]() { munmap(stack, stackSize); }); + + pid = clone(childEntry, stack + stackSize, options.cloneFlags | SIGCHLD, &wrapper); + #else + throw Error("clone flags are only supported on Linux"); + #endif + } else + pid = doFork(options.allowVfork, wrapper); + + if (pid == -1) throw SysError("unable to fork"); + + return pid; +} + + +std::string runProgram(Path program, bool searchPath, const Strings & args, + const std::optional & input, bool isInteractive) +{ + auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input, .isInteractive = isInteractive}); + + if (!statusOk(res.first)) + throw ExecError(res.first, "program '%1%' %2%", program, statusToString(res.first)); + + return res.second; +} + +// Output = error code + "standard out" output stream +std::pair runProgram(RunOptions && options) +{ + StringSink sink; + options.standardOut = &sink; + + int status = 0; + + try { + runProgram2(options); + } catch (ExecError & e) { + status = e.status; + } + + return {status, std::move(sink.s)}; +} + +void runProgram2(const RunOptions & options) +{ + checkInterrupt(); + + assert(!(options.standardIn && options.input)); + + std::unique_ptr source_; + Source * source = options.standardIn; + + if (options.input) { + source_ = std::make_unique(*options.input); + source = source_.get(); + } + + /* Create a pipe. */ + Pipe out, in; + if (options.standardOut) out.create(); + if (source) in.create(); + + ProcessOptions processOptions; + // vfork implies that the environment of the main process and the fork will + // be shared (technically this is undefined, but in practice that's the + // case), so we can't use it if we alter the environment + processOptions.allowVfork = !options.environment; + + std::optional>> resumeLoggerDefer; + if (options.isInteractive) { + logger->pause(); + resumeLoggerDefer.emplace( + []() { + logger->resume(); + } + ); + } + + /* Fork. */ + Pid pid = startProcess([&]() { + if (options.environment) + replaceEnv(*options.environment); + if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) + throw SysError("dupping stdout"); + if (options.mergeStderrToStdout) + if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) + throw SysError("cannot dup stdout into stderr"); + if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1) + throw SysError("dupping stdin"); + + if (options.chdir && chdir((*options.chdir).c_str()) == -1) + throw SysError("chdir failed"); + if (options.gid && setgid(*options.gid) == -1) + throw SysError("setgid failed"); + /* Drop all other groups if we're setgid. */ + if (options.gid && setgroups(0, 0) == -1) + throw SysError("setgroups failed"); + if (options.uid && setuid(*options.uid) == -1) + throw SysError("setuid failed"); + + Strings args_(options.args); + args_.push_front(options.program); + + restoreProcessContext(); + + if (options.searchPath) + execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); + // This allows you to refer to a program with a pathname relative + // to the PATH variable. + else + execv(options.program.c_str(), stringsToCharPtrs(args_).data()); + + throw SysError("executing '%1%'", options.program); + }, processOptions); + + out.writeSide.close(); + + std::thread writerThread; + + std::promise promise; + + Finally doJoin([&]() { + if (writerThread.joinable()) + writerThread.join(); + }); + + + if (source) { + in.readSide.close(); + writerThread = std::thread([&]() { + try { + std::vector buf(8 * 1024); + while (true) { + size_t n; + try { + n = source->read(buf.data(), buf.size()); + } catch (EndOfFile &) { + break; + } + writeFull(in.writeSide.get(), {buf.data(), n}); + } + promise.set_value(); + } catch (...) { + promise.set_exception(std::current_exception()); + } + in.writeSide.close(); + }); + } + + if (options.standardOut) + drainFD(out.readSide.get(), *options.standardOut); + + /* Wait for the child to finish. */ + int status = pid.wait(); + + /* Wait for the writer thread to finish. */ + if (source) promise.get_future().get(); + + if (status) + throw ExecError(status, "program '%1%' %2%", options.program, statusToString(status)); +} + +////////////////////////////////////////////////////////////////////// + +std::string statusToString(int status) +{ + if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { + if (WIFEXITED(status)) + return fmt("failed with exit code %1%", WEXITSTATUS(status)); + else if (WIFSIGNALED(status)) { + int sig = WTERMSIG(status); +#if HAVE_STRSIGNAL + const char * description = strsignal(sig); + return fmt("failed due to signal %1% (%2%)", sig, description); +#else + return fmt("failed due to signal %1%", sig); +#endif + } + else + return "died abnormally"; + } else return "succeeded"; +} + + +bool statusOk(int status) +{ + return WIFEXITED(status) && WEXITSTATUS(status) == 0; +} + +} diff --git a/src/libutil/processes.hh b/src/libutil/processes.hh new file mode 100644 index 000000000..978c37105 --- /dev/null +++ b/src/libutil/processes.hh @@ -0,0 +1,123 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "ansicolor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace nix { + +struct Sink; +struct Source; + +class Pid +{ + pid_t pid = -1; + bool separatePG = false; + int killSignal = SIGKILL; +public: + Pid(); + Pid(pid_t pid); + ~Pid(); + void operator =(pid_t pid); + operator pid_t(); + int kill(); + int wait(); + + void setSeparatePG(bool separatePG); + void setKillSignal(int signal); + pid_t release(); +}; + + +/** + * Kill all processes running under the specified uid by sending them + * a SIGKILL. + */ +void killUser(uid_t uid); + + +/** + * Fork a process that runs the given function, and return the child + * pid to the caller. + */ +struct ProcessOptions +{ + std::string errorPrefix = ""; + bool dieWithParent = true; + bool runExitHandlers = false; + bool allowVfork = false; + /** + * use clone() with the specified flags (Linux only) + */ + int cloneFlags = 0; +}; + +pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions()); + + +/** + * Run a program and return its stdout in a string (i.e., like the + * shell backtick operator). + */ +std::string runProgram(Path program, bool searchPath = false, + const Strings & args = Strings(), + const std::optional & input = {}, bool isInteractive = false); + +struct RunOptions +{ + Path program; + bool searchPath = true; + Strings args; + std::optional uid; + std::optional gid; + std::optional chdir; + std::optional> environment; + std::optional input; + Source * standardIn = nullptr; + Sink * standardOut = nullptr; + bool mergeStderrToStdout = false; + bool isInteractive = false; +}; + +std::pair runProgram(RunOptions && options); + +void runProgram2(const RunOptions & options); + + +class ExecError : public Error +{ +public: + int status; + + template + ExecError(int status, const Args & ... args) + : Error(args...), status(status) + { } +}; + + +/** + * Convert the exit status of a child as returned by wait() into an + * error string. + */ +std::string statusToString(int status); + +bool statusOk(int status); + +} diff --git a/src/libutil/references.cc b/src/libutil/references.cc index 7f59b4c09..9d75606ef 100644 --- a/src/libutil/references.cc +++ b/src/libutil/references.cc @@ -1,6 +1,5 @@ #include "references.hh" #include "hash.hh" -#include "util.hh" #include "archive.hh" #include diff --git a/src/libutil/serialise.cc b/src/libutil/serialise.cc index 3d5121a19..725ddbb8d 100644 --- a/src/libutil/serialise.cc +++ b/src/libutil/serialise.cc @@ -1,5 +1,5 @@ #include "serialise.hh" -#include "util.hh" +#include "signals.hh" #include #include diff --git a/src/libutil/serialise.hh b/src/libutil/serialise.hh index 333c254ea..9e07226bf 100644 --- a/src/libutil/serialise.hh +++ b/src/libutil/serialise.hh @@ -5,6 +5,7 @@ #include "types.hh" #include "util.hh" +#include "file-descriptor.hh" namespace boost::context { struct stack_context; } diff --git a/src/libutil/signals.cc b/src/libutil/signals.cc new file mode 100644 index 000000000..4632aa319 --- /dev/null +++ b/src/libutil/signals.cc @@ -0,0 +1,188 @@ +#include "signals.hh" +#include "util.hh" +#include "error.hh" +#include "sync.hh" +#include "terminal.hh" + +#include + +namespace nix { + +std::atomic _isInterrupted = false; + +static thread_local bool interruptThrown = false; +thread_local std::function interruptCheck; + +void setInterruptThrown() +{ + interruptThrown = true; +} + +void _interrupted() +{ + /* Block user interrupts while an exception is being handled. + Throwing an exception while another exception is being handled + kills the program! */ + if (!interruptThrown && !std::uncaught_exceptions()) { + interruptThrown = true; + throw Interrupted("interrupted by the user"); + } +} + + +////////////////////////////////////////////////////////////////////// + + +/* We keep track of interrupt callbacks using integer tokens, so we can iterate + safely without having to lock the data structure while executing arbitrary + functions. + */ +struct InterruptCallbacks { + typedef int64_t Token; + + /* We use unique tokens so that we can't accidentally delete the wrong + handler because of an erroneous double delete. */ + Token nextToken = 0; + + /* Used as a list, see InterruptCallbacks comment. */ + std::map> callbacks; +}; + +static Sync _interruptCallbacks; + +static void signalHandlerThread(sigset_t set) +{ + while (true) { + int signal = 0; + sigwait(&set, &signal); + + if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP) + triggerInterrupt(); + + else if (signal == SIGWINCH) { + updateWindowSize(); + } + } +} + +void triggerInterrupt() +{ + _isInterrupted = true; + + { + InterruptCallbacks::Token i = 0; + while (true) { + std::function callback; + { + auto interruptCallbacks(_interruptCallbacks.lock()); + auto lb = interruptCallbacks->callbacks.lower_bound(i); + if (lb == interruptCallbacks->callbacks.end()) + break; + + callback = lb->second; + i = lb->first + 1; + } + + try { + callback(); + } catch (...) { + ignoreException(); + } + } + } +} + + +static sigset_t savedSignalMask; +static bool savedSignalMaskIsSet = false; + +void setChildSignalMask(sigset_t * sigs) +{ + assert(sigs); // C style function, but think of sigs as a reference + +#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE + sigemptyset(&savedSignalMask); + // There's no "assign" or "copy" function, so we rely on (math) idempotence + // of the or operator: a or a = a. + sigorset(&savedSignalMask, sigs, sigs); +#else + // Without sigorset, our best bet is to assume that sigset_t is a type that + // can be assigned directly, such as is the case for a sigset_t defined as + // an integer type. + savedSignalMask = *sigs; +#endif + + savedSignalMaskIsSet = true; +} + +void saveSignalMask() { + if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) + throw SysError("querying signal mask"); + + savedSignalMaskIsSet = true; +} + +void startSignalHandlerThread() +{ + updateWindowSize(); + + saveSignalMask(); + + sigset_t set; + sigemptyset(&set); + sigaddset(&set, SIGINT); + sigaddset(&set, SIGTERM); + sigaddset(&set, SIGHUP); + sigaddset(&set, SIGPIPE); + sigaddset(&set, SIGWINCH); + if (pthread_sigmask(SIG_BLOCK, &set, nullptr)) + throw SysError("blocking signals"); + + std::thread(signalHandlerThread, set).detach(); +} + +void restoreSignals() +{ + // If startSignalHandlerThread wasn't called, that means we're not running + // in a proper libmain process, but a process that presumably manages its + // own signal handlers. Such a process should call either + // - initNix(), to be a proper libmain process + // - startSignalHandlerThread(), to resemble libmain regarding signal + // handling only + // - saveSignalMask(), for processes that define their own signal handling + // thread + // TODO: Warn about this? Have a default signal mask? The latter depends on + // whether we should generally inherit signal masks from the caller. + // I don't know what the larger unix ecosystem expects from us here. + if (!savedSignalMaskIsSet) + return; + + if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr)) + throw SysError("restoring signals"); +} + + +/* RAII helper to automatically deregister a callback. */ +struct InterruptCallbackImpl : InterruptCallback +{ + InterruptCallbacks::Token token; + ~InterruptCallbackImpl() override + { + auto interruptCallbacks(_interruptCallbacks.lock()); + interruptCallbacks->callbacks.erase(token); + } +}; + +std::unique_ptr createInterruptCallback(std::function callback) +{ + auto interruptCallbacks(_interruptCallbacks.lock()); + auto token = interruptCallbacks->nextToken++; + interruptCallbacks->callbacks.emplace(token, callback); + + auto res = std::make_unique(); + res->token = token; + + return std::unique_ptr(res.release()); +} + +} diff --git a/src/libutil/signals.hh b/src/libutil/signals.hh new file mode 100644 index 000000000..7e8beff33 --- /dev/null +++ b/src/libutil/signals.hh @@ -0,0 +1,104 @@ +#pragma once +///@file + +#include "types.hh" +#include "error.hh" +#include "logging.hh" +#include "ansicolor.hh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +namespace nix { + +/* User interruption. */ + +extern std::atomic _isInterrupted; + +extern thread_local std::function interruptCheck; + +void setInterruptThrown(); + +void _interrupted(); + +void inline checkInterrupt() +{ + if (_isInterrupted || (interruptCheck && interruptCheck())) + _interrupted(); +} + +MakeError(Interrupted, BaseError); + + +/** + * Start a thread that handles various signals. Also block those signals + * on the current thread (and thus any threads created by it). + * Saves the signal mask before changing the mask to block those signals. + * See saveSignalMask(). + */ +void startSignalHandlerThread(); + +/** + * Saves the signal mask, which is the signal mask that nix will restore + * before creating child processes. + * See setChildSignalMask() to set an arbitrary signal mask instead of the + * current mask. + */ +void saveSignalMask(); + +/** + * To use in a process that already called `startSignalHandlerThread()` + * or `saveSignalMask()` first. + */ +void restoreSignals(); + +/** + * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't + * necessarily match the current thread's mask. + * See saveSignalMask() to set the saved mask to the current mask. + */ +void setChildSignalMask(sigset_t *sigs); + +struct InterruptCallback +{ + virtual ~InterruptCallback() { }; +}; + +/** + * Register a function that gets called on SIGINT (in a non-signal + * context). + */ +std::unique_ptr createInterruptCallback( + std::function callback); + +void triggerInterrupt(); + +/** + * A RAII class that causes the current thread to receive SIGUSR1 when + * the signal handler thread receives SIGINT. That is, this allows + * SIGINT to be multiplexed to multiple threads. + */ +struct ReceiveInterrupts +{ + pthread_t target; + std::unique_ptr callback; + + ReceiveInterrupts() + : target(pthread_self()) + , callback(createInterruptCallback([&]() { pthread_kill(target, SIGUSR1); })) + { } +}; + + +} diff --git a/src/libutil/suggestions.cc b/src/libutil/suggestions.cc index 9510a5f0c..e67e986fb 100644 --- a/src/libutil/suggestions.cc +++ b/src/libutil/suggestions.cc @@ -1,7 +1,9 @@ #include "suggestions.hh" #include "ansicolor.hh" -#include "util.hh" +#include "terminal.hh" + #include +#include namespace nix { diff --git a/src/libutil/tarfile.cc b/src/libutil/tarfile.cc index 5060a8f24..1733c791c 100644 --- a/src/libutil/tarfile.cc +++ b/src/libutil/tarfile.cc @@ -3,6 +3,7 @@ #include "serialise.hh" #include "tarfile.hh" +#include "file-system.hh" namespace nix { diff --git a/src/libutil/terminal.cc b/src/libutil/terminal.cc new file mode 100644 index 000000000..8febc8771 --- /dev/null +++ b/src/libutil/terminal.cc @@ -0,0 +1,108 @@ +#include "terminal.hh" +#include "environment-variables.hh" +#include "sync.hh" + +#include +#include + +namespace nix { + +bool shouldANSI() +{ + return isatty(STDERR_FILENO) + && getEnv("TERM").value_or("dumb") != "dumb" + && !(getEnv("NO_COLOR").has_value() || getEnv("NOCOLOR").has_value()); +} + +std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width) +{ + std::string t, e; + size_t w = 0; + auto i = s.begin(); + + while (w < (size_t) width && i != s.end()) { + + if (*i == '\e') { + std::string e; + e += *i++; + char last = 0; + + if (i != s.end() && *i == '[') { + e += *i++; + // eat parameter bytes + while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; + // eat intermediate bytes + while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; + // eat final byte + if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; + } else { + if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; + } + + if (!filterAll && last == 'm') + t += e; + } + + else if (*i == '\t') { + i++; t += ' '; w++; + while (w < (size_t) width && w % 8) { + t += ' '; w++; + } + } + + else if (*i == '\r' || *i == '\a') + // do nothing for now + i++; + + else { + w++; + // Copy one UTF-8 character. + if ((*i & 0xe0) == 0xc0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } else if ((*i & 0xf0) == 0xe0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } + } else if ((*i & 0xf8) == 0xf0) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) { + t += *i++; + if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; + } + } + } else + t += *i++; + } + } + + return t; +} + + +////////////////////////////////////////////////////////////////////// + +static Sync> windowSize{{0, 0}}; + + +void updateWindowSize() +{ + struct winsize ws; + if (ioctl(2, TIOCGWINSZ, &ws) == 0) { + auto windowSize_(windowSize.lock()); + windowSize_->first = ws.ws_row; + windowSize_->second = ws.ws_col; + } +} + + +std::pair getWindowSize() +{ + return *windowSize.lock(); +} + +} diff --git a/src/libutil/terminal.hh b/src/libutil/terminal.hh new file mode 100644 index 000000000..9cb191308 --- /dev/null +++ b/src/libutil/terminal.hh @@ -0,0 +1,38 @@ +#pragma once +///@file + +#include "types.hh" + +namespace nix { +/** + * Determine whether ANSI escape sequences are appropriate for the + * present output. + */ +bool shouldANSI(); + +/** + * Truncate a string to 'width' printable characters. If 'filterAll' + * is true, all ANSI escape sequences are filtered out. Otherwise, + * some escape sequences (such as colour setting) are copied but not + * included in the character count. Also, tabs are expanded to + * spaces. + */ +std::string filterANSIEscapes(std::string_view s, + bool filterAll = false, + unsigned int width = std::numeric_limits::max()); + +/** + * Recalculate the window size, updating a global variable. Used in the + * `SIGWINCH` signal handler. + */ +void updateWindowSize(); + +/** + * @return the number of rows and columns of the terminal. + * + * The value is cached so this is quick. The cached result is computed + * by `updateWindowSize()`. + */ +std::pair getWindowSize(); + +} diff --git a/src/libutil/tests/logging.cc b/src/libutil/tests/logging.cc index 2ffdc2e9b..c6dfe63d3 100644 --- a/src/libutil/tests/logging.cc +++ b/src/libutil/tests/logging.cc @@ -2,7 +2,6 @@ #include "logging.hh" #include "nixexpr.hh" -#include "util.hh" #include #include diff --git a/src/libutil/tests/tests.cc b/src/libutil/tests/tests.cc index f3c1e8248..568f03f70 100644 --- a/src/libutil/tests/tests.cc +++ b/src/libutil/tests/tests.cc @@ -1,5 +1,8 @@ #include "util.hh" #include "types.hh" +#include "file-system.hh" +#include "processes.hh" +#include "terminal.hh" #include #include diff --git a/src/libutil/thread-pool.cc b/src/libutil/thread-pool.cc index dc4067f1b..c5e735617 100644 --- a/src/libutil/thread-pool.cc +++ b/src/libutil/thread-pool.cc @@ -1,4 +1,6 @@ #include "thread-pool.hh" +#include "signals.hh" +#include "util.hh" namespace nix { diff --git a/src/libutil/thread-pool.hh b/src/libutil/thread-pool.hh index 0e09fae97..02765badc 100644 --- a/src/libutil/thread-pool.hh +++ b/src/libutil/thread-pool.hh @@ -1,8 +1,8 @@ #pragma once ///@file +#include "error.hh" #include "sync.hh" -#include "util.hh" #include #include diff --git a/src/libutil/unix-domain-socket.cc b/src/libutil/unix-domain-socket.cc new file mode 100644 index 000000000..8949461d2 --- /dev/null +++ b/src/libutil/unix-domain-socket.cc @@ -0,0 +1,100 @@ +#include "file-system.hh" +#include "processes.hh" +#include "unix-domain-socket.hh" + +#include +#include +#include + +namespace nix { + +AutoCloseFD createUnixDomainSocket() +{ + AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM + #ifdef SOCK_CLOEXEC + | SOCK_CLOEXEC + #endif + , 0); + if (!fdSocket) + throw SysError("cannot create Unix domain socket"); + closeOnExec(fdSocket.get()); + return fdSocket; +} + + +AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) +{ + auto fdSocket = nix::createUnixDomainSocket(); + + bind(fdSocket.get(), path); + + if (chmod(path.c_str(), mode) == -1) + throw SysError("changing permissions on '%1%'", path); + + if (listen(fdSocket.get(), 100) == -1) + throw SysError("cannot listen on socket '%1%'", path); + + return fdSocket; +} + + +void bind(int fd, const std::string & path) +{ + unlink(path.c_str()); + + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + + if (path.size() + 1 >= sizeof(addr.sun_path)) { + Pid pid = startProcess([&]() { + Path dir = dirOf(path); + if (chdir(dir.c_str()) == -1) + throw SysError("chdir to '%s' failed", dir); + std::string base(baseNameOf(path)); + if (base.size() + 1 >= sizeof(addr.sun_path)) + throw Error("socket path '%s' is too long", base); + memcpy(addr.sun_path, base.c_str(), base.size() + 1); + if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot bind to socket '%s'", path); + _exit(0); + }); + int status = pid.wait(); + if (status != 0) + throw Error("cannot bind to socket '%s'", path); + } else { + memcpy(addr.sun_path, path.c_str(), path.size() + 1); + if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot bind to socket '%s'", path); + } +} + + +void connect(int fd, const std::string & path) +{ + struct sockaddr_un addr; + addr.sun_family = AF_UNIX; + + if (path.size() + 1 >= sizeof(addr.sun_path)) { + Pid pid = startProcess([&]() { + Path dir = dirOf(path); + if (chdir(dir.c_str()) == -1) + throw SysError("chdir to '%s' failed", dir); + std::string base(baseNameOf(path)); + if (base.size() + 1 >= sizeof(addr.sun_path)) + throw Error("socket path '%s' is too long", base); + memcpy(addr.sun_path, base.c_str(), base.size() + 1); + if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot connect to socket at '%s'", path); + _exit(0); + }); + int status = pid.wait(); + if (status != 0) + throw Error("cannot connect to socket at '%s'", path); + } else { + memcpy(addr.sun_path, path.c_str(), path.size() + 1); + if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) + throw SysError("cannot connect to socket at '%s'", path); + } +} + +} diff --git a/src/libutil/unix-domain-socket.hh b/src/libutil/unix-domain-socket.hh new file mode 100644 index 000000000..b78feb454 --- /dev/null +++ b/src/libutil/unix-domain-socket.hh @@ -0,0 +1,31 @@ +#pragma once +///@file + +#include "types.hh" +#include "file-descriptor.hh" + +#include + +namespace nix { + +/** + * Create a Unix domain socket. + */ +AutoCloseFD createUnixDomainSocket(); + +/** + * Create a Unix domain socket in listen mode. + */ +AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); + +/** + * Bind a Unix domain socket to a path. + */ +void bind(int fd, const std::string & path); + +/** + * Connect to a Unix domain socket. + */ +void connect(int fd, const std::string & path); + +} diff --git a/src/libutil/users.cc b/src/libutil/users.cc new file mode 100644 index 000000000..95a641322 --- /dev/null +++ b/src/libutil/users.cc @@ -0,0 +1,116 @@ +#include "util.hh" +#include "users.hh" +#include "environment-variables.hh" +#include "file-system.hh" + +#include +#include +#include + +namespace nix { + +std::string getUserName() +{ + auto pw = getpwuid(geteuid()); + std::string name = pw ? pw->pw_name : getEnv("USER").value_or(""); + if (name.empty()) + throw Error("cannot figure out user name"); + return name; +} + +Path getHomeOf(uid_t userId) +{ + std::vector buf(16384); + struct passwd pwbuf; + struct passwd * pw; + if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0 + || !pw || !pw->pw_dir || !pw->pw_dir[0]) + throw Error("cannot determine user's home directory"); + return pw->pw_dir; +} + +Path getHome() +{ + static Path homeDir = []() + { + std::optional unownedUserHomeDir = {}; + auto homeDir = getEnv("HOME"); + if (homeDir) { + // Only use $HOME if doesn't exist or is owned by the current user. + struct stat st; + int result = stat(homeDir->c_str(), &st); + if (result != 0) { + if (errno != ENOENT) { + warn("couldn't stat $HOME ('%s') for reason other than not existing ('%d'), falling back to the one defined in the 'passwd' file", *homeDir, errno); + homeDir.reset(); + } + } else if (st.st_uid != geteuid()) { + unownedUserHomeDir.swap(homeDir); + } + } + if (!homeDir) { + homeDir = getHomeOf(geteuid()); + if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) { + warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir); + } + } + return *homeDir; + }(); + return homeDir; +} + + +Path getCacheDir() +{ + auto cacheDir = getEnv("XDG_CACHE_HOME"); + return cacheDir ? *cacheDir : getHome() + "/.cache"; +} + + +Path getConfigDir() +{ + auto configDir = getEnv("XDG_CONFIG_HOME"); + return configDir ? *configDir : getHome() + "/.config"; +} + +std::vector getConfigDirs() +{ + Path configHome = getConfigDir(); + auto configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg"); + std::vector result = tokenizeString>(configDirs, ":"); + result.insert(result.begin(), configHome); + return result; +} + + +Path getDataDir() +{ + auto dataDir = getEnv("XDG_DATA_HOME"); + return dataDir ? *dataDir : getHome() + "/.local/share"; +} + +Path getStateDir() +{ + auto stateDir = getEnv("XDG_STATE_HOME"); + return stateDir ? *stateDir : getHome() + "/.local/state"; +} + +Path createNixStateDir() +{ + Path dir = getStateDir() + "/nix"; + createDirs(dir); + return dir; +} + + +std::string expandTilde(std::string_view path) +{ + // TODO: expand ~user ? + auto tilde = path.substr(0, 2); + if (tilde == "~/" || tilde == "~") + return getHome() + std::string(path.substr(1)); + else + return std::string(path); +} + +} diff --git a/src/libutil/users.hh b/src/libutil/users.hh new file mode 100644 index 000000000..cecbb8bfb --- /dev/null +++ b/src/libutil/users.hh @@ -0,0 +1,58 @@ +#pragma once +///@file + +#include "types.hh" + +#include + +namespace nix { + +std::string getUserName(); + +/** + * @return the given user's home directory from /etc/passwd. + */ +Path getHomeOf(uid_t userId); + +/** + * @return $HOME or the user's home directory from /etc/passwd. + */ +Path getHome(); + +/** + * @return $XDG_CACHE_HOME or $HOME/.cache. + */ +Path getCacheDir(); + +/** + * @return $XDG_CONFIG_HOME or $HOME/.config. + */ +Path getConfigDir(); + +/** + * @return the directories to search for user configuration files + */ +std::vector getConfigDirs(); + +/** + * @return $XDG_DATA_HOME or $HOME/.local/share. + */ +Path getDataDir(); + +/** + * @return $XDG_STATE_HOME or $HOME/.local/state. + */ +Path getStateDir(); + +/** + * Create the Nix state directory and return the path to it. + */ +Path createNixStateDir(); + +/** + * Perform tilde expansion on a path, replacing tilde with the user's + * home directory. + */ +std::string expandTilde(std::string_view path); + +} diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 3b4c181e5..ee7a22849 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -1,49 +1,10 @@ #include "util.hh" -#include "sync.hh" -#include "finally.hh" -#include "serialise.hh" -#include "cgroup.hh" +#include "fmt.hh" #include #include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include - -#include #include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef __APPLE__ -#include -#include -#endif - -#ifdef __linux__ -#include -#include -#include - -#include -#endif - - -extern char * * environ __attribute__((weak)); - namespace nix { @@ -67,1099 +28,8 @@ void initLibUtil() { assert(caught); } -std::optional getEnv(const std::string & key) -{ - char * value = getenv(key.c_str()); - if (!value) return {}; - return std::string(value); -} - -std::optional getEnvNonEmpty(const std::string & key) { - auto value = getEnv(key); - if (value == "") return {}; - return value; -} - -std::map getEnv() -{ - std::map env; - for (size_t i = 0; environ[i]; ++i) { - auto s = environ[i]; - auto eq = strchr(s, '='); - if (!eq) - // invalid env, just keep going - continue; - env.emplace(std::string(s, eq), std::string(eq + 1)); - } - return env; -} - - -void clearEnv() -{ - for (auto & name : getEnv()) - unsetenv(name.first.c_str()); -} - -void replaceEnv(const std::map & newEnv) -{ - clearEnv(); - for (auto & newEnvVar : newEnv) - setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1); -} - - -Path absPath(Path path, std::optional dir, bool resolveSymlinks) -{ - if (path[0] != '/') { - if (!dir) { -#ifdef __GNU__ - /* GNU (aka. GNU/Hurd) doesn't have any limitation on path - lengths and doesn't define `PATH_MAX'. */ - char *buf = getcwd(NULL, 0); - if (buf == NULL) -#else - char buf[PATH_MAX]; - if (!getcwd(buf, sizeof(buf))) -#endif - throw SysError("cannot get cwd"); - path = concatStrings(buf, "/", path); -#ifdef __GNU__ - free(buf); -#endif - } else - path = concatStrings(*dir, "/", path); - } - return canonPath(path, resolveSymlinks); -} - - -Path canonPath(PathView path, bool resolveSymlinks) -{ - assert(path != ""); - - std::string s; - s.reserve(256); - - if (path[0] != '/') - throw Error("not an absolute path: '%1%'", path); - - std::string temp; - - /* Count the number of times we follow a symlink and stop at some - arbitrary (but high) limit to prevent infinite loops. */ - unsigned int followCount = 0, maxFollow = 1024; - - while (1) { - - /* Skip slashes. */ - while (!path.empty() && path[0] == '/') path.remove_prefix(1); - if (path.empty()) break; - - /* Ignore `.'. */ - if (path == "." || path.substr(0, 2) == "./") - path.remove_prefix(1); - - /* If `..', delete the last component. */ - else if (path == ".." || path.substr(0, 3) == "../") - { - if (!s.empty()) s.erase(s.rfind('/')); - path.remove_prefix(2); - } - - /* Normal component; copy it. */ - else { - s += '/'; - if (const auto slash = path.find('/'); slash == std::string::npos) { - s += path; - path = {}; - } else { - s += path.substr(0, slash); - path = path.substr(slash); - } - - /* If s points to a symlink, resolve it and continue from there */ - if (resolveSymlinks && isLink(s)) { - if (++followCount >= maxFollow) - throw Error("infinite symlink recursion in path '%1%'", path); - temp = concatStrings(readLink(s), path); - path = temp; - if (!temp.empty() && temp[0] == '/') { - s.clear(); /* restart for symlinks pointing to absolute path */ - } else { - s = dirOf(s); - if (s == "/") { // we don’t want trailing slashes here, which dirOf only produces if s = / - s.clear(); - } - } - } - } - } - - return s.empty() ? "/" : std::move(s); -} - - -Path dirOf(const PathView path) -{ - Path::size_type pos = path.rfind('/'); - if (pos == std::string::npos) - return "."; - return pos == 0 ? "/" : Path(path, 0, pos); -} - - -std::string_view baseNameOf(std::string_view path) -{ - if (path.empty()) - return ""; - - auto last = path.size() - 1; - if (path[last] == '/' && last > 0) - last -= 1; - - auto pos = path.rfind('/', last); - if (pos == std::string::npos) - pos = 0; - else - pos += 1; - - return path.substr(pos, last - pos + 1); -} - - -std::string expandTilde(std::string_view path) -{ - // TODO: expand ~user ? - auto tilde = path.substr(0, 2); - if (tilde == "~/" || tilde == "~") - return getHome() + std::string(path.substr(1)); - else - return std::string(path); -} - - -bool isInDir(std::string_view path, std::string_view dir) -{ - return path.substr(0, 1) == "/" - && path.substr(0, dir.size()) == dir - && path.size() >= dir.size() + 2 - && path[dir.size()] == '/'; -} - - -bool isDirOrInDir(std::string_view path, std::string_view dir) -{ - return path == dir || isInDir(path, dir); -} - - -struct stat stat(const Path & path) -{ - struct stat st; - if (stat(path.c_str(), &st)) - throw SysError("getting status of '%1%'", path); - return st; -} - - -struct stat lstat(const Path & path) -{ - struct stat st; - if (lstat(path.c_str(), &st)) - throw SysError("getting status of '%1%'", path); - return st; -} - - -bool pathExists(const Path & path) -{ - int res; - struct stat st; - res = lstat(path.c_str(), &st); - if (!res) return true; - if (errno != ENOENT && errno != ENOTDIR) - throw SysError("getting status of %1%", path); - return false; -} - -bool pathAccessible(const Path & path) -{ - try { - return pathExists(path); - } catch (SysError & e) { - // swallow EPERM - if (e.errNo == EPERM) return false; - throw; - } -} - - -Path readLink(const Path & path) -{ - checkInterrupt(); - std::vector buf; - for (ssize_t bufSize = PATH_MAX/4; true; bufSize += bufSize/2) { - buf.resize(bufSize); - ssize_t rlSize = readlink(path.c_str(), buf.data(), bufSize); - if (rlSize == -1) - if (errno == EINVAL) - throw Error("'%1%' is not a symlink", path); - else - throw SysError("reading symbolic link '%1%'", path); - else if (rlSize < bufSize) - return std::string(buf.data(), rlSize); - } -} - - -bool isLink(const Path & path) -{ - struct stat st = lstat(path); - return S_ISLNK(st.st_mode); -} - - -DirEntries readDirectory(DIR *dir, const Path & path) -{ - DirEntries entries; - entries.reserve(64); - - struct dirent * dirent; - while (errno = 0, dirent = readdir(dir)) { /* sic */ - checkInterrupt(); - std::string name = dirent->d_name; - if (name == "." || name == "..") continue; - entries.emplace_back(name, dirent->d_ino, -#ifdef HAVE_STRUCT_DIRENT_D_TYPE - dirent->d_type -#else - DT_UNKNOWN -#endif - ); - } - if (errno) throw SysError("reading directory '%1%'", path); - - return entries; -} - -DirEntries readDirectory(const Path & path) -{ - AutoCloseDir dir(opendir(path.c_str())); - if (!dir) throw SysError("opening directory '%1%'", path); - - return readDirectory(dir.get(), path); -} - - -unsigned char getFileType(const Path & path) -{ - struct stat st = lstat(path); - if (S_ISDIR(st.st_mode)) return DT_DIR; - if (S_ISLNK(st.st_mode)) return DT_LNK; - if (S_ISREG(st.st_mode)) return DT_REG; - return DT_UNKNOWN; -} - - -std::string readFile(int fd) -{ - struct stat st; - if (fstat(fd, &st) == -1) - throw SysError("statting file"); - - return drainFD(fd, true, st.st_size); -} - - -std::string readFile(const Path & path) -{ - AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); - if (!fd) - throw SysError("opening file '%1%'", path); - return readFile(fd.get()); -} - - -void readFile(const Path & path, Sink & sink) -{ - AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_CLOEXEC); - if (!fd) - throw SysError("opening file '%s'", path); - drainFD(fd.get(), sink); -} - - -void writeFile(const Path & path, std::string_view s, mode_t mode, bool sync) -{ - AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); - if (!fd) - throw SysError("opening file '%1%'", path); - try { - writeFull(fd.get(), s); - } catch (Error & e) { - e.addTrace({}, "writing file '%1%'", path); - throw; - } - if (sync) - fd.fsync(); - // Explicitly close to make sure exceptions are propagated. - fd.close(); - if (sync) - syncParent(path); -} - - -void writeFile(const Path & path, Source & source, mode_t mode, bool sync) -{ - AutoCloseFD fd = open(path.c_str(), O_WRONLY | O_TRUNC | O_CREAT | O_CLOEXEC, mode); - if (!fd) - throw SysError("opening file '%1%'", path); - - std::vector buf(64 * 1024); - - try { - while (true) { - try { - auto n = source.read(buf.data(), buf.size()); - writeFull(fd.get(), {buf.data(), n}); - } catch (EndOfFile &) { break; } - } - } catch (Error & e) { - e.addTrace({}, "writing file '%1%'", path); - throw; - } - if (sync) - fd.fsync(); - // Explicitly close to make sure exceptions are propagated. - fd.close(); - if (sync) - syncParent(path); -} - -void syncParent(const Path & path) -{ - AutoCloseFD fd = open(dirOf(path).c_str(), O_RDONLY, 0); - if (!fd) - throw SysError("opening file '%1%'", path); - fd.fsync(); -} - -std::string readLine(int fd) -{ - std::string s; - while (1) { - checkInterrupt(); - char ch; - // FIXME: inefficient - ssize_t rd = read(fd, &ch, 1); - if (rd == -1) { - if (errno != EINTR) - throw SysError("reading a line"); - } else if (rd == 0) - throw EndOfFile("unexpected EOF reading a line"); - else { - if (ch == '\n') return s; - s += ch; - } - } -} - - -void writeLine(int fd, std::string s) -{ - s += '\n'; - writeFull(fd, s); -} - - -static void _deletePath(int parentfd, const Path & path, uint64_t & bytesFreed) -{ - checkInterrupt(); - - std::string name(baseNameOf(path)); - - struct stat st; - if (fstatat(parentfd, name.c_str(), &st, AT_SYMLINK_NOFOLLOW) == -1) { - if (errno == ENOENT) return; - throw SysError("getting status of '%1%'", path); - } - - if (!S_ISDIR(st.st_mode)) { - /* We are about to delete a file. Will it likely free space? */ - - switch (st.st_nlink) { - /* Yes: last link. */ - case 1: - bytesFreed += st.st_size; - break; - /* Maybe: yes, if 'auto-optimise-store' or manual optimisation - was performed. Instead of checking for real let's assume - it's an optimised file and space will be freed. - - In worst case we will double count on freed space for files - with exactly two hardlinks for unoptimised packages. - */ - case 2: - bytesFreed += st.st_size; - break; - /* No: 3+ links. */ - default: - break; - } - } - - if (S_ISDIR(st.st_mode)) { - /* Make the directory accessible. */ - const auto PERM_MASK = S_IRUSR | S_IWUSR | S_IXUSR; - if ((st.st_mode & PERM_MASK) != PERM_MASK) { - if (fchmodat(parentfd, name.c_str(), st.st_mode | PERM_MASK, 0) == -1) - throw SysError("chmod '%1%'", path); - } - - int fd = openat(parentfd, path.c_str(), O_RDONLY); - if (fd == -1) - throw SysError("opening directory '%1%'", path); - AutoCloseDir dir(fdopendir(fd)); - if (!dir) - throw SysError("opening directory '%1%'", path); - for (auto & i : readDirectory(dir.get(), path)) - _deletePath(dirfd(dir.get()), path + "/" + i.name, bytesFreed); - } - - int flags = S_ISDIR(st.st_mode) ? AT_REMOVEDIR : 0; - if (unlinkat(parentfd, name.c_str(), flags) == -1) { - if (errno == ENOENT) return; - throw SysError("cannot unlink '%1%'", path); - } -} - -static void _deletePath(const Path & path, uint64_t & bytesFreed) -{ - Path dir = dirOf(path); - if (dir == "") - dir = "/"; - - AutoCloseFD dirfd{open(dir.c_str(), O_RDONLY)}; - if (!dirfd) { - if (errno == ENOENT) return; - throw SysError("opening directory '%1%'", path); - } - - _deletePath(dirfd.get(), path, bytesFreed); -} - - -void deletePath(const Path & path) -{ - uint64_t dummy; - deletePath(path, dummy); -} - - -void deletePath(const Path & path, uint64_t & bytesFreed) -{ - //Activity act(*logger, lvlDebug, "recursively deleting path '%1%'", path); - bytesFreed = 0; - _deletePath(path, bytesFreed); -} - - -std::string getUserName() -{ - auto pw = getpwuid(geteuid()); - std::string name = pw ? pw->pw_name : getEnv("USER").value_or(""); - if (name.empty()) - throw Error("cannot figure out user name"); - return name; -} - -Path getHomeOf(uid_t userId) -{ - std::vector buf(16384); - struct passwd pwbuf; - struct passwd * pw; - if (getpwuid_r(userId, &pwbuf, buf.data(), buf.size(), &pw) != 0 - || !pw || !pw->pw_dir || !pw->pw_dir[0]) - throw Error("cannot determine user's home directory"); - return pw->pw_dir; -} - -Path getHome() -{ - static Path homeDir = []() - { - std::optional unownedUserHomeDir = {}; - auto homeDir = getEnv("HOME"); - if (homeDir) { - // Only use $HOME if doesn't exist or is owned by the current user. - struct stat st; - int result = stat(homeDir->c_str(), &st); - if (result != 0) { - if (errno != ENOENT) { - warn("couldn't stat $HOME ('%s') for reason other than not existing ('%d'), falling back to the one defined in the 'passwd' file", *homeDir, errno); - homeDir.reset(); - } - } else if (st.st_uid != geteuid()) { - unownedUserHomeDir.swap(homeDir); - } - } - if (!homeDir) { - homeDir = getHomeOf(geteuid()); - if (unownedUserHomeDir.has_value() && unownedUserHomeDir != homeDir) { - warn("$HOME ('%s') is not owned by you, falling back to the one defined in the 'passwd' file ('%s')", *unownedUserHomeDir, *homeDir); - } - } - return *homeDir; - }(); - return homeDir; -} - - -Path getCacheDir() -{ - auto cacheDir = getEnv("XDG_CACHE_HOME"); - return cacheDir ? *cacheDir : getHome() + "/.cache"; -} - - -Path getConfigDir() -{ - auto configDir = getEnv("XDG_CONFIG_HOME"); - return configDir ? *configDir : getHome() + "/.config"; -} - -std::vector getConfigDirs() -{ - Path configHome = getConfigDir(); - auto configDirs = getEnv("XDG_CONFIG_DIRS").value_or("/etc/xdg"); - std::vector result = tokenizeString>(configDirs, ":"); - result.insert(result.begin(), configHome); - return result; -} - - -Path getDataDir() -{ - auto dataDir = getEnv("XDG_DATA_HOME"); - return dataDir ? *dataDir : getHome() + "/.local/share"; -} - -Path getStateDir() -{ - auto stateDir = getEnv("XDG_STATE_HOME"); - return stateDir ? *stateDir : getHome() + "/.local/state"; -} - -Path createNixStateDir() -{ - Path dir = getStateDir() + "/nix"; - createDirs(dir); - return dir; -} - - -std::optional getSelfExe() -{ - static auto cached = []() -> std::optional - { - #if __linux__ - return readLink("/proc/self/exe"); - #elif __APPLE__ - char buf[1024]; - uint32_t size = sizeof(buf); - if (_NSGetExecutablePath(buf, &size) == 0) - return buf; - else - return std::nullopt; - #else - return std::nullopt; - #endif - }(); - return cached; -} - - -Paths createDirs(const Path & path) -{ - Paths created; - if (path == "/") return created; - - struct stat st; - if (lstat(path.c_str(), &st) == -1) { - created = createDirs(dirOf(path)); - if (mkdir(path.c_str(), 0777) == -1 && errno != EEXIST) - throw SysError("creating directory '%1%'", path); - st = lstat(path); - created.push_back(path); - } - - if (S_ISLNK(st.st_mode) && stat(path.c_str(), &st) == -1) - throw SysError("statting symlink '%1%'", path); - - if (!S_ISDIR(st.st_mode)) throw Error("'%1%' is not a directory", path); - - return created; -} - - -void readFull(int fd, char * buf, size_t count) -{ - while (count) { - checkInterrupt(); - ssize_t res = read(fd, buf, count); - if (res == -1) { - if (errno == EINTR) continue; - throw SysError("reading from file"); - } - if (res == 0) throw EndOfFile("unexpected end-of-file"); - count -= res; - buf += res; - } -} - - -void writeFull(int fd, std::string_view s, bool allowInterrupts) -{ - while (!s.empty()) { - if (allowInterrupts) checkInterrupt(); - ssize_t res = write(fd, s.data(), s.size()); - if (res == -1 && errno != EINTR) - throw SysError("writing to file"); - if (res > 0) - s.remove_prefix(res); - } -} - - -std::string drainFD(int fd, bool block, const size_t reserveSize) -{ - // the parser needs two extra bytes to append terminating characters, other users will - // not care very much about the extra memory. - StringSink sink(reserveSize + 2); - drainFD(fd, sink, block); - return std::move(sink.s); -} - - -void drainFD(int fd, Sink & sink, bool block) -{ - // silence GCC maybe-uninitialized warning in finally - int saved = 0; - - if (!block) { - saved = fcntl(fd, F_GETFL); - if (fcntl(fd, F_SETFL, saved | O_NONBLOCK) == -1) - throw SysError("making file descriptor non-blocking"); - } - - Finally finally([&]() { - if (!block) { - if (fcntl(fd, F_SETFL, saved) == -1) - throw SysError("making file descriptor blocking"); - } - }); - - std::vector buf(64 * 1024); - while (1) { - checkInterrupt(); - ssize_t rd = read(fd, buf.data(), buf.size()); - if (rd == -1) { - if (!block && (errno == EAGAIN || errno == EWOULDBLOCK)) - break; - if (errno != EINTR) - throw SysError("reading from file"); - } - else if (rd == 0) break; - else sink({(char *) buf.data(), (size_t) rd}); - } -} - ////////////////////////////////////////////////////////////////////// -unsigned int getMaxCPU() -{ - #if __linux__ - try { - auto cgroupFS = getCgroupFS(); - if (!cgroupFS) return 0; - - auto cgroups = getCgroups("/proc/self/cgroup"); - auto cgroup = cgroups[""]; - if (cgroup == "") return 0; - - auto cpuFile = *cgroupFS + "/" + cgroup + "/cpu.max"; - - auto cpuMax = readFile(cpuFile); - auto cpuMaxParts = tokenizeString>(cpuMax, " \n"); - auto quota = cpuMaxParts[0]; - auto period = cpuMaxParts[1]; - if (quota != "max") - return std::ceil(std::stoi(quota) / std::stof(period)); - } catch (Error &) { ignoreException(lvlDebug); } - #endif - - return 0; -} - -////////////////////////////////////////////////////////////////////// - - -AutoDelete::AutoDelete() : del{false} {} - -AutoDelete::AutoDelete(const std::string & p, bool recursive) : path(p) -{ - del = true; - this->recursive = recursive; -} - -AutoDelete::~AutoDelete() -{ - try { - if (del) { - if (recursive) - deletePath(path); - else { - if (remove(path.c_str()) == -1) - throw SysError("cannot unlink '%1%'", path); - } - } - } catch (...) { - ignoreException(); - } -} - -void AutoDelete::cancel() -{ - del = false; -} - -void AutoDelete::reset(const Path & p, bool recursive) { - path = p; - this->recursive = recursive; - del = true; -} - - - -////////////////////////////////////////////////////////////////////// - - -AutoCloseFD::AutoCloseFD() : fd{-1} {} - - -AutoCloseFD::AutoCloseFD(int fd) : fd{fd} {} - - -AutoCloseFD::AutoCloseFD(AutoCloseFD && that) : fd{that.fd} -{ - that.fd = -1; -} - - -AutoCloseFD & AutoCloseFD::operator =(AutoCloseFD && that) -{ - close(); - fd = that.fd; - that.fd = -1; - return *this; -} - - -AutoCloseFD::~AutoCloseFD() -{ - try { - close(); - } catch (...) { - ignoreException(); - } -} - - -int AutoCloseFD::get() const -{ - return fd; -} - - -void AutoCloseFD::close() -{ - if (fd != -1) { - if (::close(fd) == -1) - /* This should never happen. */ - throw SysError("closing file descriptor %1%", fd); - fd = -1; - } -} - -void AutoCloseFD::fsync() -{ - if (fd != -1) { - int result; -#if __APPLE__ - result = ::fcntl(fd, F_FULLFSYNC); -#else - result = ::fsync(fd); -#endif - if (result == -1) - throw SysError("fsync file descriptor %1%", fd); - } -} - - -AutoCloseFD::operator bool() const -{ - return fd != -1; -} - - -int AutoCloseFD::release() -{ - int oldFD = fd; - fd = -1; - return oldFD; -} - - -void Pipe::create() -{ - int fds[2]; -#if HAVE_PIPE2 - if (pipe2(fds, O_CLOEXEC) != 0) throw SysError("creating pipe"); -#else - if (pipe(fds) != 0) throw SysError("creating pipe"); - closeOnExec(fds[0]); - closeOnExec(fds[1]); -#endif - readSide = fds[0]; - writeSide = fds[1]; -} - - -void Pipe::close() -{ - readSide.close(); - writeSide.close(); -} - - -////////////////////////////////////////////////////////////////////// - - -Pid::Pid() -{ -} - - -Pid::Pid(pid_t pid) - : pid(pid) -{ -} - - -Pid::~Pid() -{ - if (pid != -1) kill(); -} - - -void Pid::operator =(pid_t pid) -{ - if (this->pid != -1 && this->pid != pid) kill(); - this->pid = pid; - killSignal = SIGKILL; // reset signal to default -} - - -Pid::operator pid_t() -{ - return pid; -} - - -int Pid::kill() -{ - assert(pid != -1); - - debug("killing process %1%", pid); - - /* Send the requested signal to the child. If it has its own - process group, send the signal to every process in the child - process group (which hopefully includes *all* its children). */ - if (::kill(separatePG ? -pid : pid, killSignal) != 0) { - /* On BSDs, killing a process group will return EPERM if all - processes in the group are zombies (or something like - that). So try to detect and ignore that situation. */ -#if __FreeBSD__ || __APPLE__ - if (errno != EPERM || ::kill(pid, 0) != 0) -#endif - logError(SysError("killing process %d", pid).info()); - } - - return wait(); -} - - -int Pid::wait() -{ - assert(pid != -1); - while (1) { - int status; - int res = waitpid(pid, &status, 0); - if (res == pid) { - pid = -1; - return status; - } - if (errno != EINTR) - throw SysError("cannot get exit status of PID %d", pid); - checkInterrupt(); - } -} - - -void Pid::setSeparatePG(bool separatePG) -{ - this->separatePG = separatePG; -} - - -void Pid::setKillSignal(int signal) -{ - this->killSignal = signal; -} - - -pid_t Pid::release() -{ - pid_t p = pid; - pid = -1; - return p; -} - - -void killUser(uid_t uid) -{ - debug("killing all processes running under uid '%1%'", uid); - - assert(uid != 0); /* just to be safe... */ - - /* The system call kill(-1, sig) sends the signal `sig' to all - users to which the current process can send signals. So we - fork a process, switch to uid, and send a mass kill. */ - - Pid pid = startProcess([&]() { - - if (setuid(uid) == -1) - throw SysError("setting uid"); - - while (true) { -#ifdef __APPLE__ - /* OSX's kill syscall takes a third parameter that, among - other things, determines if kill(-1, signo) affects the - calling process. In the OSX libc, it's set to true, - which means "follow POSIX", which we don't want here - */ - if (syscall(SYS_kill, -1, SIGKILL, false) == 0) break; -#else - if (kill(-1, SIGKILL) == 0) break; -#endif - if (errno == ESRCH || errno == EPERM) break; /* no more processes */ - if (errno != EINTR) - throw SysError("cannot kill processes for uid '%1%'", uid); - } - - _exit(0); - }); - - int status = pid.wait(); - if (status != 0) - throw Error("cannot kill processes for uid '%1%': %2%", uid, statusToString(status)); - - /* !!! We should really do some check to make sure that there are - no processes left running under `uid', but there is no portable - way to do so (I think). The most reliable way may be `ps -eo - uid | grep -q $uid'. */ -} - - -////////////////////////////////////////////////////////////////////// - - -/* Wrapper around vfork to prevent the child process from clobbering - the caller's stack frame in the parent. */ -static pid_t doFork(bool allowVfork, std::function fun) __attribute__((noinline)); -static pid_t doFork(bool allowVfork, std::function fun) -{ -#ifdef __linux__ - pid_t pid = allowVfork ? vfork() : fork(); -#else - pid_t pid = fork(); -#endif - if (pid != 0) return pid; - fun(); - abort(); -} - - -#if __linux__ -static int childEntry(void * arg) -{ - auto main = (std::function *) arg; - (*main)(); - return 1; -} -#endif - - -pid_t startProcess(std::function fun, const ProcessOptions & options) -{ - std::function wrapper = [&]() { - if (!options.allowVfork) - logger = makeSimpleLogger(); - try { -#if __linux__ - if (options.dieWithParent && prctl(PR_SET_PDEATHSIG, SIGKILL) == -1) - throw SysError("setting death signal"); -#endif - fun(); - } catch (std::exception & e) { - try { - std::cerr << options.errorPrefix << e.what() << "\n"; - } catch (...) { } - } catch (...) { } - if (options.runExitHandlers) - exit(1); - else - _exit(1); - }; - - pid_t pid = -1; - - if (options.cloneFlags) { - #ifdef __linux__ - // Not supported, since then we don't know when to free the stack. - assert(!(options.cloneFlags & CLONE_VM)); - - size_t stackSize = 1 * 1024 * 1024; - auto stack = (char *) mmap(0, stackSize, - PROT_WRITE | PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) throw SysError("allocating stack"); - - Finally freeStack([&]() { munmap(stack, stackSize); }); - - pid = clone(childEntry, stack + stackSize, options.cloneFlags | SIGCHLD, &wrapper); - #else - throw Error("clone flags are only supported on Linux"); - #endif - } else - pid = doFork(options.allowVfork, wrapper); - - if (pid == -1) throw SysError("unable to fork"); - - return pid; -} - - std::vector stringsToCharPtrs(const Strings & ss) { std::vector res; @@ -1168,211 +38,6 @@ std::vector stringsToCharPtrs(const Strings & ss) return res; } -std::string runProgram(Path program, bool searchPath, const Strings & args, - const std::optional & input, bool isInteractive) -{ - auto res = runProgram(RunOptions {.program = program, .searchPath = searchPath, .args = args, .input = input, .isInteractive = isInteractive}); - - if (!statusOk(res.first)) - throw ExecError(res.first, "program '%1%' %2%", program, statusToString(res.first)); - - return res.second; -} - -// Output = error code + "standard out" output stream -std::pair runProgram(RunOptions && options) -{ - StringSink sink; - options.standardOut = &sink; - - int status = 0; - - try { - runProgram2(options); - } catch (ExecError & e) { - status = e.status; - } - - return {status, std::move(sink.s)}; -} - -void runProgram2(const RunOptions & options) -{ - checkInterrupt(); - - assert(!(options.standardIn && options.input)); - - std::unique_ptr source_; - Source * source = options.standardIn; - - if (options.input) { - source_ = std::make_unique(*options.input); - source = source_.get(); - } - - /* Create a pipe. */ - Pipe out, in; - if (options.standardOut) out.create(); - if (source) in.create(); - - ProcessOptions processOptions; - // vfork implies that the environment of the main process and the fork will - // be shared (technically this is undefined, but in practice that's the - // case), so we can't use it if we alter the environment - processOptions.allowVfork = !options.environment; - - std::optional>> resumeLoggerDefer; - if (options.isInteractive) { - logger->pause(); - resumeLoggerDefer.emplace( - []() { - logger->resume(); - } - ); - } - - /* Fork. */ - Pid pid = startProcess([&]() { - if (options.environment) - replaceEnv(*options.environment); - if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1) - throw SysError("dupping stdout"); - if (options.mergeStderrToStdout) - if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1) - throw SysError("cannot dup stdout into stderr"); - if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1) - throw SysError("dupping stdin"); - - if (options.chdir && chdir((*options.chdir).c_str()) == -1) - throw SysError("chdir failed"); - if (options.gid && setgid(*options.gid) == -1) - throw SysError("setgid failed"); - /* Drop all other groups if we're setgid. */ - if (options.gid && setgroups(0, 0) == -1) - throw SysError("setgroups failed"); - if (options.uid && setuid(*options.uid) == -1) - throw SysError("setuid failed"); - - Strings args_(options.args); - args_.push_front(options.program); - - restoreProcessContext(); - - if (options.searchPath) - execvp(options.program.c_str(), stringsToCharPtrs(args_).data()); - // This allows you to refer to a program with a pathname relative - // to the PATH variable. - else - execv(options.program.c_str(), stringsToCharPtrs(args_).data()); - - throw SysError("executing '%1%'", options.program); - }, processOptions); - - out.writeSide.close(); - - std::thread writerThread; - - std::promise promise; - - Finally doJoin([&]() { - if (writerThread.joinable()) - writerThread.join(); - }); - - - if (source) { - in.readSide.close(); - writerThread = std::thread([&]() { - try { - std::vector buf(8 * 1024); - while (true) { - size_t n; - try { - n = source->read(buf.data(), buf.size()); - } catch (EndOfFile &) { - break; - } - writeFull(in.writeSide.get(), {buf.data(), n}); - } - promise.set_value(); - } catch (...) { - promise.set_exception(std::current_exception()); - } - in.writeSide.close(); - }); - } - - if (options.standardOut) - drainFD(out.readSide.get(), *options.standardOut); - - /* Wait for the child to finish. */ - int status = pid.wait(); - - /* Wait for the writer thread to finish. */ - if (source) promise.get_future().get(); - - if (status) - throw ExecError(status, "program '%1%' %2%", options.program, statusToString(status)); -} - - -void closeMostFDs(const std::set & exceptions) -{ -#if __linux__ - try { - for (auto & s : readDirectory("/proc/self/fd")) { - auto fd = std::stoi(s.name); - if (!exceptions.count(fd)) { - debug("closing leaked FD %d", fd); - close(fd); - } - } - return; - } catch (SysError &) { - } -#endif - - int maxFD = 0; - maxFD = sysconf(_SC_OPEN_MAX); - for (int fd = 0; fd < maxFD; ++fd) - if (!exceptions.count(fd)) - close(fd); /* ignore result */ -} - - -void closeOnExec(int fd) -{ - int prev; - if ((prev = fcntl(fd, F_GETFD, 0)) == -1 || - fcntl(fd, F_SETFD, prev | FD_CLOEXEC) == -1) - throw SysError("setting close-on-exec flag"); -} - - -////////////////////////////////////////////////////////////////////// - - -std::atomic _isInterrupted = false; - -static thread_local bool interruptThrown = false; -thread_local std::function interruptCheck; - -void setInterruptThrown() -{ - interruptThrown = true; -} - -void _interrupted() -{ - /* Block user interrupts while an exception is being handled. - Throwing an exception while another exception is being handled - kills the program! */ - if (!interruptThrown && !std::uncaught_exceptions()) { - interruptThrown = true; - throw Interrupted("interrupted by the user"); - } -} - ////////////////////////////////////////////////////////////////////// @@ -1438,32 +103,6 @@ std::string rewriteStrings(std::string s, const StringMap & rewrites) } -std::string statusToString(int status) -{ - if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) { - if (WIFEXITED(status)) - return fmt("failed with exit code %1%", WEXITSTATUS(status)); - else if (WIFSIGNALED(status)) { - int sig = WTERMSIG(status); -#if HAVE_STRSIGNAL - const char * description = strsignal(sig); - return fmt("failed due to signal %1% (%2%)", sig, description); -#else - return fmt("failed due to signal %1%", sig); -#endif - } - else - return "died abnormally"; - } else return "succeeded"; -} - - -bool statusOk(int status) -{ - return WIFEXITED(status) && WEXITSTATUS(status) == 0; -} - - bool hasPrefix(std::string_view s, std::string_view prefix) { return s.compare(0, prefix.size(), prefix) == 0; @@ -1511,82 +150,6 @@ void ignoreException(Verbosity lvl) } catch (...) { } } -bool shouldANSI() -{ - return isatty(STDERR_FILENO) - && getEnv("TERM").value_or("dumb") != "dumb" - && !(getEnv("NO_COLOR").has_value() || getEnv("NOCOLOR").has_value()); -} - -std::string filterANSIEscapes(std::string_view s, bool filterAll, unsigned int width) -{ - std::string t, e; - size_t w = 0; - auto i = s.begin(); - - while (w < (size_t) width && i != s.end()) { - - if (*i == '\e') { - std::string e; - e += *i++; - char last = 0; - - if (i != s.end() && *i == '[') { - e += *i++; - // eat parameter bytes - while (i != s.end() && *i >= 0x30 && *i <= 0x3f) e += *i++; - // eat intermediate bytes - while (i != s.end() && *i >= 0x20 && *i <= 0x2f) e += *i++; - // eat final byte - if (i != s.end() && *i >= 0x40 && *i <= 0x7e) e += last = *i++; - } else { - if (i != s.end() && *i >= 0x40 && *i <= 0x5f) e += *i++; - } - - if (!filterAll && last == 'm') - t += e; - } - - else if (*i == '\t') { - i++; t += ' '; w++; - while (w < (size_t) width && w % 8) { - t += ' '; w++; - } - } - - else if (*i == '\r' || *i == '\a') - // do nothing for now - i++; - - else { - w++; - // Copy one UTF-8 character. - if ((*i & 0xe0) == 0xc0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } else if ((*i & 0xf0) == 0xe0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } - } else if ((*i & 0xf8) == 0xf0) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) { - t += *i++; - if (i != s.end() && ((*i & 0xc0) == 0x80)) t += *i++; - } - } - } else - t += *i++; - } - } - - return t; -} - constexpr char base64Chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; @@ -1703,386 +266,9 @@ std::pair getLine(std::string_view s) } -////////////////////////////////////////////////////////////////////// - -static Sync> windowSize{{0, 0}}; - - -static void updateWindowSize() -{ - struct winsize ws; - if (ioctl(2, TIOCGWINSZ, &ws) == 0) { - auto windowSize_(windowSize.lock()); - windowSize_->first = ws.ws_row; - windowSize_->second = ws.ws_col; - } -} - - -std::pair getWindowSize() -{ - return *windowSize.lock(); -} - - -/* We keep track of interrupt callbacks using integer tokens, so we can iterate - safely without having to lock the data structure while executing arbitrary - functions. - */ -struct InterruptCallbacks { - typedef int64_t Token; - - /* We use unique tokens so that we can't accidentally delete the wrong - handler because of an erroneous double delete. */ - Token nextToken = 0; - - /* Used as a list, see InterruptCallbacks comment. */ - std::map> callbacks; -}; - -static Sync _interruptCallbacks; - -static void signalHandlerThread(sigset_t set) -{ - while (true) { - int signal = 0; - sigwait(&set, &signal); - - if (signal == SIGINT || signal == SIGTERM || signal == SIGHUP) - triggerInterrupt(); - - else if (signal == SIGWINCH) { - updateWindowSize(); - } - } -} - -void triggerInterrupt() -{ - _isInterrupted = true; - - { - InterruptCallbacks::Token i = 0; - while (true) { - std::function callback; - { - auto interruptCallbacks(_interruptCallbacks.lock()); - auto lb = interruptCallbacks->callbacks.lower_bound(i); - if (lb == interruptCallbacks->callbacks.end()) - break; - - callback = lb->second; - i = lb->first + 1; - } - - try { - callback(); - } catch (...) { - ignoreException(); - } - } - } -} - -static sigset_t savedSignalMask; -static bool savedSignalMaskIsSet = false; - -void setChildSignalMask(sigset_t * sigs) -{ - assert(sigs); // C style function, but think of sigs as a reference - -#if _POSIX_C_SOURCE >= 1 || _XOPEN_SOURCE || _POSIX_SOURCE - sigemptyset(&savedSignalMask); - // There's no "assign" or "copy" function, so we rely on (math) idempotence - // of the or operator: a or a = a. - sigorset(&savedSignalMask, sigs, sigs); -#else - // Without sigorset, our best bet is to assume that sigset_t is a type that - // can be assigned directly, such as is the case for a sigset_t defined as - // an integer type. - savedSignalMask = *sigs; -#endif - - savedSignalMaskIsSet = true; -} - -void saveSignalMask() { - if (sigprocmask(SIG_BLOCK, nullptr, &savedSignalMask)) - throw SysError("querying signal mask"); - - savedSignalMaskIsSet = true; -} - -void startSignalHandlerThread() -{ - updateWindowSize(); - - saveSignalMask(); - - sigset_t set; - sigemptyset(&set); - sigaddset(&set, SIGINT); - sigaddset(&set, SIGTERM); - sigaddset(&set, SIGHUP); - sigaddset(&set, SIGPIPE); - sigaddset(&set, SIGWINCH); - if (pthread_sigmask(SIG_BLOCK, &set, nullptr)) - throw SysError("blocking signals"); - - std::thread(signalHandlerThread, set).detach(); -} - -static void restoreSignals() -{ - // If startSignalHandlerThread wasn't called, that means we're not running - // in a proper libmain process, but a process that presumably manages its - // own signal handlers. Such a process should call either - // - initNix(), to be a proper libmain process - // - startSignalHandlerThread(), to resemble libmain regarding signal - // handling only - // - saveSignalMask(), for processes that define their own signal handling - // thread - // TODO: Warn about this? Have a default signal mask? The latter depends on - // whether we should generally inherit signal masks from the caller. - // I don't know what the larger unix ecosystem expects from us here. - if (!savedSignalMaskIsSet) - return; - - if (sigprocmask(SIG_SETMASK, &savedSignalMask, nullptr)) - throw SysError("restoring signals"); -} - -#if __linux__ -rlim_t savedStackSize = 0; -#endif - -void setStackSize(size_t stackSize) -{ - #if __linux__ - struct rlimit limit; - if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur < stackSize) { - savedStackSize = limit.rlim_cur; - limit.rlim_cur = stackSize; - setrlimit(RLIMIT_STACK, &limit); - } - #endif -} - -#if __linux__ -static AutoCloseFD fdSavedMountNamespace; -static AutoCloseFD fdSavedRoot; -#endif - -void saveMountNamespace() -{ -#if __linux__ - static std::once_flag done; - std::call_once(done, []() { - fdSavedMountNamespace = open("/proc/self/ns/mnt", O_RDONLY); - if (!fdSavedMountNamespace) - throw SysError("saving parent mount namespace"); - - fdSavedRoot = open("/proc/self/root", O_RDONLY); - }); -#endif -} - -void restoreMountNamespace() -{ -#if __linux__ - try { - auto savedCwd = absPath("."); - - if (fdSavedMountNamespace && setns(fdSavedMountNamespace.get(), CLONE_NEWNS) == -1) - throw SysError("restoring parent mount namespace"); - - if (fdSavedRoot) { - if (fchdir(fdSavedRoot.get())) - throw SysError("chdir into saved root"); - if (chroot(".")) - throw SysError("chroot into saved root"); - } - - if (chdir(savedCwd.c_str()) == -1) - throw SysError("restoring cwd"); - } catch (Error & e) { - debug(e.msg()); - } -#endif -} - -void unshareFilesystem() -{ -#ifdef __linux__ - if (unshare(CLONE_FS) != 0 && errno != EPERM) - throw SysError("unsharing filesystem state in download thread"); -#endif -} - -void restoreProcessContext(bool restoreMounts) -{ - restoreSignals(); - if (restoreMounts) { - restoreMountNamespace(); - } - - #if __linux__ - if (savedStackSize) { - struct rlimit limit; - if (getrlimit(RLIMIT_STACK, &limit) == 0) { - limit.rlim_cur = savedStackSize; - setrlimit(RLIMIT_STACK, &limit); - } - } - #endif -} - -/* RAII helper to automatically deregister a callback. */ -struct InterruptCallbackImpl : InterruptCallback -{ - InterruptCallbacks::Token token; - ~InterruptCallbackImpl() override - { - auto interruptCallbacks(_interruptCallbacks.lock()); - interruptCallbacks->callbacks.erase(token); - } -}; - -std::unique_ptr createInterruptCallback(std::function callback) -{ - auto interruptCallbacks(_interruptCallbacks.lock()); - auto token = interruptCallbacks->nextToken++; - interruptCallbacks->callbacks.emplace(token, callback); - - auto res = std::make_unique(); - res->token = token; - - return std::unique_ptr(res.release()); -} - - -AutoCloseFD createUnixDomainSocket() -{ - AutoCloseFD fdSocket = socket(PF_UNIX, SOCK_STREAM - #ifdef SOCK_CLOEXEC - | SOCK_CLOEXEC - #endif - , 0); - if (!fdSocket) - throw SysError("cannot create Unix domain socket"); - closeOnExec(fdSocket.get()); - return fdSocket; -} - - -AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode) -{ - auto fdSocket = nix::createUnixDomainSocket(); - - bind(fdSocket.get(), path); - - if (chmod(path.c_str(), mode) == -1) - throw SysError("changing permissions on '%1%'", path); - - if (listen(fdSocket.get(), 100) == -1) - throw SysError("cannot listen on socket '%1%'", path); - - return fdSocket; -} - - -void bind(int fd, const std::string & path) -{ - unlink(path.c_str()); - - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - - if (path.size() + 1 >= sizeof(addr.sun_path)) { - Pid pid = startProcess([&]() { - Path dir = dirOf(path); - if (chdir(dir.c_str()) == -1) - throw SysError("chdir to '%s' failed", dir); - std::string base(baseNameOf(path)); - if (base.size() + 1 >= sizeof(addr.sun_path)) - throw Error("socket path '%s' is too long", base); - memcpy(addr.sun_path, base.c_str(), base.size() + 1); - if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot bind to socket '%s'", path); - _exit(0); - }); - int status = pid.wait(); - if (status != 0) - throw Error("cannot bind to socket '%s'", path); - } else { - memcpy(addr.sun_path, path.c_str(), path.size() + 1); - if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot bind to socket '%s'", path); - } -} - - -void connect(int fd, const std::string & path) -{ - struct sockaddr_un addr; - addr.sun_family = AF_UNIX; - - if (path.size() + 1 >= sizeof(addr.sun_path)) { - Pid pid = startProcess([&]() { - Path dir = dirOf(path); - if (chdir(dir.c_str()) == -1) - throw SysError("chdir to '%s' failed", dir); - std::string base(baseNameOf(path)); - if (base.size() + 1 >= sizeof(addr.sun_path)) - throw Error("socket path '%s' is too long", base); - memcpy(addr.sun_path, base.c_str(), base.size() + 1); - if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot connect to socket at '%s'", path); - _exit(0); - }); - int status = pid.wait(); - if (status != 0) - throw Error("cannot connect to socket at '%s'", path); - } else { - memcpy(addr.sun_path, path.c_str(), path.size() + 1); - if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) - throw SysError("cannot connect to socket at '%s'", path); - } -} - - std::string showBytes(uint64_t bytes) { return fmt("%.2f MiB", bytes / (1024.0 * 1024.0)); } - -// FIXME: move to libstore/build -void commonChildInit() -{ - logger = makeSimpleLogger(); - - const static std::string pathNullDevice = "/dev/null"; - restoreProcessContext(false); - - /* Put the child in a separate session (and thus a separate - process group) so that it has no controlling terminal (meaning - that e.g. ssh cannot open /dev/tty) and it doesn't receive - terminal signals. */ - if (setsid() == -1) - throw SysError("creating a new session"); - - /* Dup stderr to stdout. */ - if (dup2(STDERR_FILENO, STDOUT_FILENO) == -1) - throw SysError("cannot dup stderr into stdout"); - - /* Reroute stdin to /dev/null. */ - int fdDevNull = open(pathNullDevice.c_str(), O_RDWR); - if (fdDevNull == -1) - throw SysError("cannot open '%1%'", pathNullDevice); - if (dup2(fdDevNull, STDIN_FILENO) == -1) - throw SysError("cannot dup null device into stdin"); - close(fdDevNull); -} - } diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 75683f8fe..5f730eaf6 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -4,485 +4,18 @@ #include "types.hh" #include "error.hh" #include "logging.hh" -#include "ansicolor.hh" - -#include -#include -#include -#include -#include #include -#include #include #include #include #include -#ifndef HAVE_STRUCT_DIRENT_D_TYPE -#define DT_UNKNOWN 0 -#define DT_REG 1 -#define DT_LNK 2 -#define DT_DIR 3 -#endif - namespace nix { -struct Sink; -struct Source; - void initLibUtil(); -/** - * @return an environment variable. - */ -std::optional getEnv(const std::string & key); - -/** - * @return a non empty environment variable. Returns nullopt if the env - * variable is set to "" - */ -std::optional getEnvNonEmpty(const std::string & key); - -/** - * Get the entire environment. - */ -std::map getEnv(); - -/** - * Clear the environment. - */ -void clearEnv(); - -/** - * @return An absolutized path, resolving paths relative to the - * specified directory, or the current directory otherwise. The path - * is also canonicalised. - */ -Path absPath(Path path, - std::optional dir = {}, - bool resolveSymlinks = false); - -/** - * Canonicalise a path by removing all `.` or `..` components and - * double or trailing slashes. Optionally resolves all symlink - * components such that each component of the resulting path is *not* - * a symbolic link. - */ -Path canonPath(PathView path, bool resolveSymlinks = false); - -/** - * @return The directory part of the given canonical path, i.e., - * everything before the final `/`. If the path is the root or an - * immediate child thereof (e.g., `/foo`), this means `/` - * is returned. - */ -Path dirOf(const PathView path); - -/** - * @return the base name of the given canonical path, i.e., everything - * following the final `/` (trailing slashes are removed). - */ -std::string_view baseNameOf(std::string_view path); - -/** - * Perform tilde expansion on a path. - */ -std::string expandTilde(std::string_view path); - -/** - * Check whether 'path' is a descendant of 'dir'. Both paths must be - * canonicalized. - */ -bool isInDir(std::string_view path, std::string_view dir); - -/** - * Check whether 'path' is equal to 'dir' or a descendant of - * 'dir'. Both paths must be canonicalized. - */ -bool isDirOrInDir(std::string_view path, std::string_view dir); - -/** - * Get status of `path`. - */ -struct stat stat(const Path & path); -struct stat lstat(const Path & path); - -/** - * @return true iff the given path exists. - */ -bool pathExists(const Path & path); - -/** - * A version of pathExists that returns false on a permission error. - * Useful for inferring default paths across directories that might not - * be readable. - * @return true iff the given path can be accessed and exists - */ -bool pathAccessible(const Path & path); - -/** - * Read the contents (target) of a symbolic link. The result is not - * in any way canonicalised. - */ -Path readLink(const Path & path); - -bool isLink(const Path & path); - -/** - * Read the contents of a directory. The entries `.` and `..` are - * removed. - */ -struct DirEntry -{ - std::string name; - ino_t ino; - /** - * one of DT_* - */ - unsigned char type; - DirEntry(std::string name, ino_t ino, unsigned char type) - : name(std::move(name)), ino(ino), type(type) { } -}; - -typedef std::vector DirEntries; - -DirEntries readDirectory(const Path & path); - -unsigned char getFileType(const Path & path); - -/** - * Read the contents of a file into a string. - */ -std::string readFile(int fd); -std::string readFile(const Path & path); -void readFile(const Path & path, Sink & sink); - -/** - * Write a string to a file. - */ -void writeFile(const Path & path, std::string_view s, mode_t mode = 0666, bool sync = false); - -void writeFile(const Path & path, Source & source, mode_t mode = 0666, bool sync = false); - -/** - * Flush a file's parent directory to disk - */ -void syncParent(const Path & path); - -/** - * Read a line from a file descriptor. - */ -std::string readLine(int fd); - -/** - * Write a line to a file descriptor. - */ -void writeLine(int fd, std::string s); - -/** - * Delete a path; i.e., in the case of a directory, it is deleted - * recursively. It's not an error if the path does not exist. The - * second variant returns the number of bytes and blocks freed. - */ -void deletePath(const Path & path); - -void deletePath(const Path & path, uint64_t & bytesFreed); - -std::string getUserName(); - -/** - * @return the given user's home directory from /etc/passwd. - */ -Path getHomeOf(uid_t userId); - -/** - * @return $HOME or the user's home directory from /etc/passwd. - */ -Path getHome(); - -/** - * @return $XDG_CACHE_HOME or $HOME/.cache. - */ -Path getCacheDir(); - -/** - * @return $XDG_CONFIG_HOME or $HOME/.config. - */ -Path getConfigDir(); - -/** - * @return the directories to search for user configuration files - */ -std::vector getConfigDirs(); - -/** - * @return $XDG_DATA_HOME or $HOME/.local/share. - */ -Path getDataDir(); - -/** - * @return the path of the current executable. - */ -std::optional getSelfExe(); - -/** - * @return $XDG_STATE_HOME or $HOME/.local/state. - */ -Path getStateDir(); - -/** - * Create the Nix state directory and return the path to it. - */ -Path createNixStateDir(); - -/** - * Create a directory and all its parents, if necessary. Returns the - * list of created directories, in order of creation. - */ -Paths createDirs(const Path & path); -inline Paths createDirs(PathView path) -{ - return createDirs(Path(path)); -} - -/** - * Create a symlink. - */ -void createSymlink(const Path & target, const Path & link); - -/** - * Atomically create or replace a symlink. - */ -void replaceSymlink(const Path & target, const Path & link); - -void renameFile(const Path & src, const Path & dst); - -/** - * Similar to 'renameFile', but fallback to a copy+remove if `src` and `dst` - * are on a different filesystem. - * - * Beware that this might not be atomic because of the copy that happens behind - * the scenes - */ -void moveFile(const Path & src, const Path & dst); - - -/** - * Wrappers arount read()/write() that read/write exactly the - * requested number of bytes. - */ -void readFull(int fd, char * buf, size_t count); -void writeFull(int fd, std::string_view s, bool allowInterrupts = true); - -MakeError(EndOfFile, Error); - - -/** - * Read a file descriptor until EOF occurs. - */ -std::string drainFD(int fd, bool block = true, const size_t reserveSize=0); - -void drainFD(int fd, Sink & sink, bool block = true); - -/** - * If cgroups are active, attempt to calculate the number of CPUs available. - * If cgroups are unavailable or if cpu.max is set to "max", return 0. - */ -unsigned int getMaxCPU(); - -/** - * Automatic cleanup of resources. - */ - - -class AutoDelete -{ - Path path; - bool del; - bool recursive; -public: - AutoDelete(); - AutoDelete(const Path & p, bool recursive = true); - ~AutoDelete(); - void cancel(); - void reset(const Path & p, bool recursive = true); - operator Path() const { return path; } - operator PathView() const { return path; } -}; - - -class AutoCloseFD -{ - int fd; -public: - AutoCloseFD(); - AutoCloseFD(int fd); - AutoCloseFD(const AutoCloseFD & fd) = delete; - AutoCloseFD(AutoCloseFD&& fd); - ~AutoCloseFD(); - AutoCloseFD& operator =(const AutoCloseFD & fd) = delete; - AutoCloseFD& operator =(AutoCloseFD&& fd); - int get() const; - explicit operator bool() const; - int release(); - void close(); - void fsync(); -}; - - -/** - * Create a temporary directory. - */ -Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix", - bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755); - -/** - * Create a temporary file, returning a file handle and its path. - */ -std::pair createTempFile(const Path & prefix = "nix"); - - -class Pipe -{ -public: - AutoCloseFD readSide, writeSide; - void create(); - void close(); -}; - - -struct DIRDeleter -{ - void operator()(DIR * dir) const { - closedir(dir); - } -}; - -typedef std::unique_ptr AutoCloseDir; - - -class Pid -{ - pid_t pid = -1; - bool separatePG = false; - int killSignal = SIGKILL; -public: - Pid(); - Pid(pid_t pid); - ~Pid(); - void operator =(pid_t pid); - operator pid_t(); - int kill(); - int wait(); - - void setSeparatePG(bool separatePG); - void setKillSignal(int signal); - pid_t release(); -}; - - -/** - * Kill all processes running under the specified uid by sending them - * a SIGKILL. - */ -void killUser(uid_t uid); - - -/** - * Fork a process that runs the given function, and return the child - * pid to the caller. - */ -struct ProcessOptions -{ - std::string errorPrefix = ""; - bool dieWithParent = true; - bool runExitHandlers = false; - bool allowVfork = false; - /** - * use clone() with the specified flags (Linux only) - */ - int cloneFlags = 0; -}; - -pid_t startProcess(std::function fun, const ProcessOptions & options = ProcessOptions()); - - -/** - * Run a program and return its stdout in a string (i.e., like the - * shell backtick operator). - */ -std::string runProgram(Path program, bool searchPath = false, - const Strings & args = Strings(), - const std::optional & input = {}, bool isInteractive = false); - -struct RunOptions -{ - Path program; - bool searchPath = true; - Strings args; - std::optional uid; - std::optional gid; - std::optional chdir; - std::optional> environment; - std::optional input; - Source * standardIn = nullptr; - Sink * standardOut = nullptr; - bool mergeStderrToStdout = false; - bool isInteractive = false; -}; - -std::pair runProgram(RunOptions && options); - -void runProgram2(const RunOptions & options); - - -/** - * Change the stack size. - */ -void setStackSize(size_t stackSize); - - -/** - * Restore the original inherited Unix process context (such as signal - * masks, stack size). - - * See startSignalHandlerThread(), saveSignalMask(). - */ -void restoreProcessContext(bool restoreMounts = true); - -/** - * Save the current mount namespace. Ignored if called more than - * once. - */ -void saveMountNamespace(); - -/** - * Restore the mount namespace saved by saveMountNamespace(). Ignored - * if saveMountNamespace() was never called. - */ -void restoreMountNamespace(); - -/** - * Cause this thread to not share any FS attributes with the main - * thread, because this causes setns() in restoreMountNamespace() to - * fail. - */ -void unshareFilesystem(); - - -class ExecError : public Error -{ -public: - int status; - - template - ExecError(int status, const Args & ... args) - : Error(args...), status(status) - { } -}; - /** * Convert a list of strings to a null-terminated vector of `char * *`s. The result must not be accessed beyond the lifetime of the @@ -490,36 +23,6 @@ public: */ std::vector stringsToCharPtrs(const Strings & ss); -/** - * Close all file descriptors except those listed in the given set. - * Good practice in child processes. - */ -void closeMostFDs(const std::set & exceptions); - -/** - * Set the close-on-exec flag for the given file descriptor. - */ -void closeOnExec(int fd); - - -/* User interruption. */ - -extern std::atomic _isInterrupted; - -extern thread_local std::function interruptCheck; - -void setInterruptThrown(); - -void _interrupted(); - -void inline checkInterrupt() -{ - if (_isInterrupted || (interruptCheck && interruptCheck())) - _interrupted(); -} - -MakeError(Interrupted, BaseError); - MakeError(FormatError, Error); @@ -595,15 +98,6 @@ std::string replaceStrings( std::string rewriteStrings(std::string s, const StringMap & rewrites); -/** - * Convert the exit status of a child as returned by wait() into an - * error string. - */ -std::string statusToString(int status); - -bool statusOk(int status); - - /** * Parse a string into an integer. */ @@ -711,23 +205,6 @@ constexpr char treeLast[] = "└───"; constexpr char treeLine[] = "│ "; constexpr char treeNull[] = " "; -/** - * Determine whether ANSI escape sequences are appropriate for the - * present output. - */ -bool shouldANSI(); - -/** - * Truncate a string to 'width' printable characters. If 'filterAll' - * is true, all ANSI escape sequences are filtered out. Otherwise, - * some escape sequences (such as colour setting) are copied but not - * included in the character count. Also, tabs are expanded to - * spaces. - */ -std::string filterANSIEscapes(std::string_view s, - bool filterAll = false, - unsigned int width = std::numeric_limits::max()); - /** * Base64 encoding/decoding. @@ -815,61 +292,6 @@ template class Callback; -/** - * Start a thread that handles various signals. Also block those signals - * on the current thread (and thus any threads created by it). - * Saves the signal mask before changing the mask to block those signals. - * See saveSignalMask(). - */ -void startSignalHandlerThread(); - -/** - * Saves the signal mask, which is the signal mask that nix will restore - * before creating child processes. - * See setChildSignalMask() to set an arbitrary signal mask instead of the - * current mask. - */ -void saveSignalMask(); - -/** - * Sets the signal mask. Like saveSignalMask() but for a signal set that doesn't - * necessarily match the current thread's mask. - * See saveSignalMask() to set the saved mask to the current mask. - */ -void setChildSignalMask(sigset_t *sigs); - -struct InterruptCallback -{ - virtual ~InterruptCallback() { }; -}; - -/** - * Register a function that gets called on SIGINT (in a non-signal - * context). - */ -std::unique_ptr createInterruptCallback( - std::function callback); - -void triggerInterrupt(); - -/** - * A RAII class that causes the current thread to receive SIGUSR1 when - * the signal handler thread receives SIGINT. That is, this allows - * SIGINT to be multiplexed to multiple threads. - */ -struct ReceiveInterrupts -{ - pthread_t target; - std::unique_ptr callback; - - ReceiveInterrupts() - : target(pthread_self()) - , callback(createInterruptCallback([&]() { pthread_kill(target, SIGUSR1); })) - { } -}; - - - /** * A RAII helper that increments a counter on construction and * decrements it on destruction. @@ -884,45 +306,6 @@ struct MaintainCount }; -/** - * @return the number of rows and columns of the terminal. - */ -std::pair getWindowSize(); - - -/** - * Used in various places. - */ -typedef std::function PathFilter; - -extern PathFilter defaultPathFilter; - -/** - * Common initialisation performed in child processes. - */ -void commonChildInit(); - -/** - * Create a Unix domain socket. - */ -AutoCloseFD createUnixDomainSocket(); - -/** - * Create a Unix domain socket in listen mode. - */ -AutoCloseFD createUnixDomainSocket(const Path & path, mode_t mode); - -/** - * Bind a Unix domain socket to a path. - */ -void bind(int fd, const std::string & path); - -/** - * Connect to a Unix domain socket. - */ -void connect(int fd, const std::string & path); - - /** * A Rust/Python-like enumerate() iterator adapter. * diff --git a/src/nix-build/nix-build.cc b/src/nix-build/nix-build.cc index 60bc08146..75ce12a8c 100644 --- a/src/nix-build/nix-build.cc +++ b/src/nix-build/nix-build.cc @@ -9,12 +9,12 @@ #include +#include "current-process.hh" #include "parsed-derivations.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "globals.hh" #include "derivations.hh" -#include "util.hh" #include "shared.hh" #include "path-with-outputs.hh" #include "eval.hh" diff --git a/src/nix-channel/nix-channel.cc b/src/nix-channel/nix-channel.cc index 4504441fa..79db78236 100644 --- a/src/nix-channel/nix-channel.cc +++ b/src/nix-channel/nix-channel.cc @@ -5,7 +5,7 @@ #include "store-api.hh" #include "legacy.hh" #include "eval-settings.hh" // for defexpr -#include "util.hh" +#include "users.hh" #include "tarball.hh" #include diff --git a/src/nix-collect-garbage/nix-collect-garbage.cc b/src/nix-collect-garbage/nix-collect-garbage.cc index 70af53b28..bb3f1bc6a 100644 --- a/src/nix-collect-garbage/nix-collect-garbage.cc +++ b/src/nix-collect-garbage/nix-collect-garbage.cc @@ -1,3 +1,5 @@ +#include "file-system.hh" +#include "signals.hh" #include "store-api.hh" #include "store-cast.hh" #include "gc-store.hh" diff --git a/src/nix-env/nix-env.cc b/src/nix-env/nix-env.cc index 25068f801..213a20d93 100644 --- a/src/nix-env/nix-env.cc +++ b/src/nix-env/nix-env.cc @@ -1,3 +1,4 @@ +#include "users.hh" #include "attr-path.hh" #include "common-eval-args.hh" #include "derivations.hh" @@ -11,7 +12,6 @@ #include "store-api.hh" #include "local-fs-store.hh" #include "user-env.hh" -#include "util.hh" #include "value-to-json.hh" #include "xml-writer.hh" #include "legacy.hh" diff --git a/src/nix-env/user-env.cc b/src/nix-env/user-env.cc index d12d70f33..250224e7d 100644 --- a/src/nix-env/user-env.cc +++ b/src/nix-env/user-env.cc @@ -1,5 +1,4 @@ #include "user-env.hh" -#include "util.hh" #include "derivations.hh" #include "store-api.hh" #include "path-with-outputs.hh" diff --git a/src/nix-instantiate/nix-instantiate.cc b/src/nix-instantiate/nix-instantiate.cc index d40196497..c67409e89 100644 --- a/src/nix-instantiate/nix-instantiate.cc +++ b/src/nix-instantiate/nix-instantiate.cc @@ -6,7 +6,6 @@ #include "attr-path.hh" #include "value-to-xml.hh" #include "value-to-json.hh" -#include "util.hh" #include "store-api.hh" #include "local-fs-store.hh" #include "common-eval-args.hh" diff --git a/src/nix-store/dotgraph.cc b/src/nix-store/dotgraph.cc index 577cadceb..2c530999b 100644 --- a/src/nix-store/dotgraph.cc +++ b/src/nix-store/dotgraph.cc @@ -1,5 +1,4 @@ #include "dotgraph.hh" -#include "util.hh" #include "store-api.hh" #include diff --git a/src/nix-store/graphml.cc b/src/nix-store/graphml.cc index 439557658..3e789a2d8 100644 --- a/src/nix-store/graphml.cc +++ b/src/nix-store/graphml.cc @@ -1,5 +1,4 @@ #include "graphml.hh" -#include "util.hh" #include "store-api.hh" #include "derivations.hh" diff --git a/src/nix-store/nix-store.cc b/src/nix-store/nix-store.cc index e4dd94585..123283dfe 100644 --- a/src/nix-store/nix-store.cc +++ b/src/nix-store/nix-store.cc @@ -11,7 +11,6 @@ #include "serve-protocol.hh" #include "serve-protocol-impl.hh" #include "shared.hh" -#include "util.hh" #include "graphml.hh" #include "legacy.hh" #include "path-with-outputs.hh" diff --git a/src/nix/daemon.cc b/src/nix/daemon.cc index af428018a..373dedf7c 100644 --- a/src/nix/daemon.cc +++ b/src/nix/daemon.cc @@ -1,11 +1,12 @@ ///@file +#include "signals.hh" +#include "unix-domain-socket.hh" #include "command.hh" #include "shared.hh" #include "local-store.hh" #include "remote-store.hh" #include "remote-store-connection.hh" -#include "util.hh" #include "serialise.hh" #include "archive.hh" #include "globals.hh" diff --git a/src/nix/develop.cc b/src/nix/develop.cc index b080a3939..38482ed42 100644 --- a/src/nix/develop.cc +++ b/src/nix/develop.cc @@ -8,7 +8,6 @@ #include "derivations.hh" #include "progress-bar.hh" #include "run.hh" -#include "util.hh" #include #include diff --git a/src/nix/doctor.cc b/src/nix/doctor.cc index 1aa6831d3..59f9e3e5d 100644 --- a/src/nix/doctor.cc +++ b/src/nix/doctor.cc @@ -6,7 +6,6 @@ #include "shared.hh" #include "store-api.hh" #include "local-fs-store.hh" -#include "util.hh" #include "worker-protocol.hh" using namespace nix; diff --git a/src/nix/edit.cc b/src/nix/edit.cc index 66629fab0..9cbab230b 100644 --- a/src/nix/edit.cc +++ b/src/nix/edit.cc @@ -1,3 +1,4 @@ +#include "current-process.hh" #include "command-installable-value.hh" #include "shared.hh" #include "eval.hh" diff --git a/src/nix/flake.cc b/src/nix/flake.cc index e8906a252..38938f09e 100644 --- a/src/nix/flake.cc +++ b/src/nix/flake.cc @@ -15,6 +15,7 @@ #include "registry.hh" #include "eval-cache.hh" #include "markdown.hh" +#include "users.hh" #include #include diff --git a/src/nix/main.cc b/src/nix/main.cc index d20bc1f8a..b582fc166 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -1,6 +1,8 @@ #include #include "args/root.hh" +#include "current-process.hh" +#include "namespaces.hh" #include "command.hh" #include "common-args.hh" #include "eval.hh" diff --git a/src/nix/run.cc b/src/nix/run.cc index 1465e8cde..ea0a17897 100644 --- a/src/nix/run.cc +++ b/src/nix/run.cc @@ -1,3 +1,4 @@ +#include "current-process.hh" #include "run.hh" #include "command-installable-value.hh" #include "common-args.hh" diff --git a/src/nix/sigs.cc b/src/nix/sigs.cc index 45cd2e1a6..a68616355 100644 --- a/src/nix/sigs.cc +++ b/src/nix/sigs.cc @@ -1,3 +1,4 @@ +#include "signals.hh" #include "command.hh" #include "shared.hh" #include "store-api.hh" diff --git a/src/nix/upgrade-nix.cc b/src/nix/upgrade-nix.cc index d238456db..c529c2363 100644 --- a/src/nix/upgrade-nix.cc +++ b/src/nix/upgrade-nix.cc @@ -1,3 +1,4 @@ +#include "processes.hh" #include "command.hh" #include "common-args.hh" #include "store-api.hh" diff --git a/src/nix/verify.cc b/src/nix/verify.cc index adaa33c0c..78cb765ce 100644 --- a/src/nix/verify.cc +++ b/src/nix/verify.cc @@ -4,6 +4,7 @@ #include "sync.hh" #include "thread-pool.hh" #include "references.hh" +#include "signals.hh" #include From 6472c3bf0d4b529f28f9e50834e1fc3dd101c409 Mon Sep 17 00:00:00 2001 From: ThinkChaos Date: Sat, 4 Nov 2023 12:08:00 -0400 Subject: [PATCH 13/55] fix(ssh): extraneous master processes --- src/libstore/ssh.cc | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 03b2f0be9..300eb391c 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -132,7 +132,6 @@ Path SSHMaster::startMaster() if (state->sshMaster != -1) return state->socketPath; - state->socketPath = (Path) *state->tmpDir + "/ssh.sock"; Pipe out; @@ -144,7 +143,8 @@ Path SSHMaster::startMaster() logger->pause(); Finally cleanup = [&]() { logger->resume(); }; - bool wasMasterRunning = isMasterRunning(); + if (isMasterRunning()) + return state->socketPath; state->sshMaster = startProcess([&]() { restoreProcessContext(); @@ -165,14 +165,13 @@ Path SSHMaster::startMaster() out.writeSide = -1; - if (!wasMasterRunning) { - std::string reply; - try { - reply = readLine(out.readSide.get()); - } catch (EndOfFile & e) { } + std::string reply; + try { + reply = readLine(out.readSide.get()); + } catch (EndOfFile & e) { } - if (reply != "started") - throw Error("failed to start SSH master connection to '%s'", host); + if (reply != "started") { + throw Error("failed to start SSH master connection to '%s'", host); } return state->socketPath; From 2fb49759b8307838dd1208d8ce756a60d41e4ebf Mon Sep 17 00:00:00 2001 From: ThinkChaos Date: Sat, 4 Nov 2023 12:32:57 -0400 Subject: [PATCH 14/55] fix(ssh): log first line of stdout Spent a while debugging why `nix-copy-closure` wasn't working anymore and it was my shell RC printing something I added for debug. Hopefully this can save someone else some time. --- src/libstore/ssh.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/libstore/ssh.cc b/src/libstore/ssh.cc index 300eb391c..5c8d6a504 100644 --- a/src/libstore/ssh.cc +++ b/src/libstore/ssh.cc @@ -114,8 +114,10 @@ std::unique_ptr SSHMaster::startCommand(const std::string reply = readLine(out.readSide.get()); } catch (EndOfFile & e) { } - if (reply != "started") + if (reply != "started") { + printTalkative("SSH stdout first line: %s", reply); throw Error("failed to start SSH connection to '%s'", host); + } } conn->out = std::move(out.readSide); @@ -171,6 +173,7 @@ Path SSHMaster::startMaster() } catch (EndOfFile & e) { } if (reply != "started") { + printTalkative("SSH master stdout first line: %s", reply); throw Error("failed to start SSH master connection to '%s'", host); } From 0b0d1b521449e7a66e7fa33ca7afe292d88aa14b Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 26 Oct 2023 19:39:21 -0400 Subject: [PATCH 15/55] Add comparison functions for `NarInfo` We will need these for tests. --- src/libstore/nar-info.cc | 9 +++++++++ src/libstore/nar-info.hh | 2 ++ 2 files changed, 11 insertions(+) diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index ee2ddfd81..2b77c6ab7 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -4,6 +4,15 @@ namespace nix { +GENERATE_CMP_EXT( + , + NarInfo, + me->url, + me->compression, + me->fileHash, + me->fileSize, + static_cast(*me)); + NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence) : ValidPathInfo(StorePath(StorePath::dummy), Hash(Hash::dummy)) // FIXME: hack { diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh index 5dbdafac3..1b3551106 100644 --- a/src/libstore/nar-info.hh +++ b/src/libstore/nar-info.hh @@ -24,6 +24,8 @@ struct NarInfo : ValidPathInfo NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } NarInfo(const Store & store, const std::string & s, const std::string & whence); + DECLARE_CMP(NarInfo); + std::string to_string(const Store & store) const; }; From 07ac53732b8989758c264d4e847c94a5d28072cf Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Sun, 5 Nov 2023 15:27:25 +0100 Subject: [PATCH 16/55] Fix moves in appendOrSet --- src/libutil/config-impl.hh | 10 +++++----- src/libutil/config.cc | 20 +++++++++----------- src/libutil/config.hh | 2 +- 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/libutil/config-impl.hh b/src/libutil/config-impl.hh index b9639e761..9f69e8444 100644 --- a/src/libutil/config-impl.hh +++ b/src/libutil/config-impl.hh @@ -45,13 +45,13 @@ bool BaseSetting::isAppendable() return trait::appendable; } -template<> void BaseSetting::appendOrSet(Strings && newValue, bool append); -template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append); -template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append); -template<> void BaseSetting>::appendOrSet(std::set && newValue, bool append); +template<> void BaseSetting::appendOrSet(Strings newValue, bool append); +template<> void BaseSetting::appendOrSet(StringSet newValue, bool append); +template<> void BaseSetting::appendOrSet(StringMap newValue, bool append); +template<> void BaseSetting>::appendOrSet(std::set newValue, bool append); template -void BaseSetting::appendOrSet(T && newValue, bool append) +void BaseSetting::appendOrSet(T newValue, bool append) { static_assert( !trait::appendable, diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 0bf36c987..5b510b69e 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -301,10 +301,11 @@ template<> Strings BaseSetting::parse(const std::string & str) const return tokenizeString(str); } -template<> void BaseSetting::appendOrSet(Strings && newValue, bool append) +template<> void BaseSetting::appendOrSet(Strings newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) value.push_back(std::move(s)); + value.insert(value.end(), std::make_move_iterator(newValue.begin()), + std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const @@ -317,11 +318,10 @@ template<> StringSet BaseSetting::parse(const std::string & str) cons return tokenizeString(str); } -template<> void BaseSetting::appendOrSet(StringSet && newValue, bool append) +template<> void BaseSetting::appendOrSet(StringSet newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) - value.insert(s); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const @@ -342,11 +342,10 @@ template<> std::set BaseSetting void BaseSetting>::appendOrSet(std::set && newValue, bool append) +template<> void BaseSetting>::appendOrSet(std::set newValue, bool append) { if (!append) value.clear(); - for (auto && s : std::move(newValue)) - value.insert(s); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting>::to_string() const @@ -369,11 +368,10 @@ template<> StringMap BaseSetting::parse(const std::string & str) cons return res; } -template<> void BaseSetting::appendOrSet(StringMap && newValue, bool append) +template<> void BaseSetting::appendOrSet(StringMap newValue, bool append) { if (!append) value.clear(); - for (auto && [k, v] : std::move(newValue)) - value.emplace(std::move(k), std::move(v)); + value.insert(std::make_move_iterator(newValue.begin()), std::make_move_iterator(newValue.end())); } template<> std::string BaseSetting::to_string() const diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 38c3ce0c4..d9441fb63 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -247,7 +247,7 @@ protected: * * @param append Whether to append or overwrite. */ - virtual void appendOrSet(T && newValue, bool append); + virtual void appendOrSet(T newValue, bool append); public: From ad385f9ec44f8d845e994764c45876042c715946 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Sun, 5 Nov 2023 15:27:48 +0100 Subject: [PATCH 17/55] Minor improvements --- src/libutil/config.hh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libutil/config.hh b/src/libutil/config.hh index d9441fb63..3f2522c38 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -150,7 +150,7 @@ public: AbstractSetting * setting; }; - typedef std::map Settings; + using Settings = std::map; private: @@ -316,7 +316,7 @@ std::ostream & operator <<(std::ostream & str, const BaseSetting & opt) } template -bool operator ==(const T & v1, const BaseSetting & v2) { return v1 == (const T &) v2; } +bool operator ==(const T & v1, const BaseSetting & v2) { return v1 == static_cast(v2); } template class Setting : public BaseSetting @@ -329,7 +329,7 @@ public: const std::set & aliases = {}, const bool documentDefault = true, std::optional experimentalFeature = std::nullopt) - : BaseSetting(def, documentDefault, name, description, aliases, experimentalFeature) + : BaseSetting(def, documentDefault, name, description, aliases, std::move(experimentalFeature)) { options->addSetting(this); } From f404e9b3b362a054219797df02bbe277de249f80 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Sun, 5 Nov 2023 16:12:20 +0100 Subject: [PATCH 18/55] Make toJSONObject const --- src/libutil/abstract-setting-to-json.hh | 2 +- src/libutil/config.cc | 2 +- src/libutil/config.hh | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/libutil/abstract-setting-to-json.hh b/src/libutil/abstract-setting-to-json.hh index d506dfb74..eea687d8a 100644 --- a/src/libutil/abstract-setting-to-json.hh +++ b/src/libutil/abstract-setting-to-json.hh @@ -7,7 +7,7 @@ namespace nix { template -std::map BaseSetting::toJSONObject() +std::map BaseSetting::toJSONObject() const { auto obj = AbstractSetting::toJSONObject(); obj.emplace("value", value); diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 5b510b69e..2a5cf6212 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -221,7 +221,7 @@ nlohmann::json AbstractSetting::toJSON() return nlohmann::json(toJSONObject()); } -std::map AbstractSetting::toJSONObject() +std::map AbstractSetting::toJSONObject() const { std::map obj; obj.emplace("description", description); diff --git a/src/libutil/config.hh b/src/libutil/config.hh index 3f2522c38..5d7bd8e0c 100644 --- a/src/libutil/config.hh +++ b/src/libutil/config.hh @@ -213,7 +213,7 @@ protected: nlohmann::json toJSON(); - virtual std::map toJSONObject(); + virtual std::map toJSONObject() const; virtual void convertToArg(Args & args, const std::string & category); @@ -306,7 +306,7 @@ public: void convertToArg(Args & args, const std::string & category) override; - std::map toJSONObject() override; + std::map toJSONObject() const override; }; template From a4b7df7bfaee7d27a152be2445886c81881daf94 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Mon, 6 Nov 2023 15:47:25 +0100 Subject: [PATCH 19/55] More const, scope reductions, move fixes --- src/libutil/config.cc | 60 +++++++++++++++++++------------------------ 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 2a5cf6212..8e7901133 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -36,27 +36,25 @@ bool Config::set(const std::string & name, const std::string & value) void Config::addSetting(AbstractSetting * setting) { _settings.emplace(setting->name, Config::SettingData{false, setting}); - for (auto & alias : setting->aliases) + for (const auto & alias : setting->aliases) _settings.emplace(alias, Config::SettingData{true, setting}); bool set = false; - auto i = unknownSettings.find(setting->name); - if (i != unknownSettings.end()) { - setting->set(i->second); + if (auto i = unknownSettings.find(setting->name); i != unknownSettings.end()) { + setting->set(std::move(i->second)); setting->overridden = true; unknownSettings.erase(i); set = true; } for (auto & alias : setting->aliases) { - auto i = unknownSettings.find(alias); - if (i != unknownSettings.end()) { + if (auto i = unknownSettings.find(alias); i != unknownSettings.end()) { if (set) warn("setting '%s' is set, but it's an alias of '%s' which is also set", alias, setting->name); else { - setting->set(i->second); + setting->set(std::move(i->second)); setting->overridden = true; unknownSettings.erase(i); set = true; @@ -71,7 +69,7 @@ AbstractConfig::AbstractConfig(StringMap initials) void AbstractConfig::warnUnknownSettings() { - for (auto & s : unknownSettings) + for (const auto & s : unknownSettings) warn("unknown setting '%s'", s.first); } @@ -85,7 +83,7 @@ void AbstractConfig::reapplyUnknownSettings() void Config::getSettings(std::map & res, bool overriddenOnly) { - for (auto & opt : _settings) + for (const auto & opt : _settings) if (!opt.second.isAlias && (!overriddenOnly || opt.second.setting->overridden)) res.emplace(opt.first, SettingInfo{opt.second.setting->to_string(), opt.second.setting->description}); } @@ -101,8 +99,7 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string line += contents[pos++]; pos++; - auto hash = line.find('#'); - if (hash != std::string::npos) + if (auto hash = line.find('#'); hash != line.npos) line = std::string(line, 0, hash); auto tokens = tokenizeString>(line); @@ -135,24 +132,24 @@ void AbstractConfig::applyConfig(const std::string & contents, const std::string if (tokens[1] != "=") throw UsageError("illegal configuration line '%1%' in '%2%'", line, path); - std::string name = tokens[0]; + std::string name = std::move(tokens[0]); auto i = tokens.begin(); advance(i, 2); parsedContents.push_back({ - name, + std::move(name), concatStringsSep(" ", Strings(i, tokens.end())), }); }; // First apply experimental-feature related settings - for (auto & [name, value] : parsedContents) + for (const auto & [name, value] : parsedContents) if (name == "experimental-features" || name == "extra-experimental-features") set(name, value); // Then apply other settings - for (auto & [name, value] : parsedContents) + for (const auto & [name, value] : parsedContents) if (name != "experimental-features" && name != "extra-experimental-features") set(name, value); } @@ -174,7 +171,7 @@ void Config::resetOverridden() nlohmann::json Config::toJSON() { auto res = nlohmann::json::object(); - for (auto & s : _settings) + for (const auto & s : _settings) if (!s.second.isAlias) res.emplace(s.first, s.second.setting->toJSON()); return res; @@ -182,8 +179,8 @@ nlohmann::json Config::toJSON() std::string Config::toKeyValue() { - auto res = std::string(); - for (auto & s : _settings) + std::string res; + for (const auto & s : _settings) if (s.second.isAlias) res += fmt("%s = %s\n", s.first, s.second.setting->to_string()); return res; @@ -205,7 +202,7 @@ AbstractSetting::AbstractSetting( : name(name) , description(stripIndentation(description)) , aliases(aliases) - , experimentalFeature(experimentalFeature) + , experimentalFeature(std::move(experimentalFeature)) { } @@ -284,14 +281,14 @@ template<> void BaseSetting::convertToArg(Args & args, const std::string & .longName = name, .description = fmt("Enable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(true); }}, + .handler = {[this] { override(true); }}, .experimentalFeature = experimentalFeature, }); args.addFlag({ .longName = "no-" + name, .description = fmt("Disable the `%s` setting.", name), .category = category, - .handler = {[this]() { override(false); }}, + .handler = {[this] { override(false); }}, .experimentalFeature = experimentalFeature, }); } @@ -333,8 +330,7 @@ template<> std::set BaseSetting res; for (auto & s : tokenizeString(str)) { - auto thisXpFeature = parseExperimentalFeature(s); - if (thisXpFeature) + if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) res.insert(thisXpFeature.value()); else warn("unknown experimental feature '%s'", s); @@ -351,7 +347,7 @@ template<> void BaseSetting>::appendOrSet(std::set template<> std::string BaseSetting>::to_string() const { StringSet stringifiedXpFeatures; - for (auto & feature : value) + for (const auto & feature : value) stringifiedXpFeatures.insert(std::string(showExperimentalFeature(feature))); return concatStringsSep(" ", stringifiedXpFeatures); } @@ -359,9 +355,8 @@ template<> std::string BaseSetting>::to_string() c template<> StringMap BaseSetting::parse(const std::string & str) const { StringMap res; - for (auto & s : tokenizeString(str)) { - auto eq = s.find_first_of('='); - if (std::string::npos != eq) + for (const auto & s : tokenizeString(str)) { + if (auto eq = s.find_first_of('='); s.npos != eq) res.emplace(std::string(s, 0, eq), std::string(s, eq + 1)); // else ignored } @@ -376,10 +371,9 @@ template<> void BaseSetting::appendOrSet(StringMap newValue, bool app template<> std::string BaseSetting::to_string() const { - Strings kvstrs; - std::transform(value.begin(), value.end(), back_inserter(kvstrs), - [&](auto kvpair){ return kvpair.first + "=" + kvpair.second; }); - return concatStringsSep(" ", kvstrs); + return std::transform_reduce(value.cbegin(), value.cend(), std::string{}, + [](const auto & l, const auto &r) { return l + " " + r; }, + [](const auto & kvpair){ return kvpair.first + "=" + kvpair.second; }); } template class BaseSetting; @@ -468,7 +462,7 @@ void GlobalConfig::resetOverridden() nlohmann::json GlobalConfig::toJSON() { auto res = nlohmann::json::object(); - for (auto & config : *configRegistrations) + for (const auto & config : *configRegistrations) res.update(config->toJSON()); return res; } @@ -478,7 +472,7 @@ std::string GlobalConfig::toKeyValue() std::string res; std::map settings; globalConfig.getSettings(settings); - for (auto & s : settings) + for (const auto & s : settings) res += fmt("%s = %s\n", s.first, s.second.value); return res; } From 937e02e7b9538fd4500ade184eb4f0a888a9967d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 22 Oct 2023 21:12:54 -0400 Subject: [PATCH 20/55] Shuffle `ValidPathInfo` JSON rendering `Store::pathInfoToJSON` was a rather baroque functions, being full of parameters to support both parsed derivations and `nix path-info`. The common core of each, a simple `dValidPathInfo::toJSON` function, is factored out, but the rest of the logic is just duplicated and then specialized to its use-case (at which point it is no longer that duplicated). This keeps the human oriented CLI logic (which is currently unstable) and the core domain logic (export reference graphs with structured attrs, which is stable), separate, which I think is better. --- src/libstore/nar-info.cc | 46 +++++++++ src/libstore/nar-info.hh | 9 ++ src/libstore/parsed-derivations.cc | 34 ++++++- src/libstore/path-info.cc | 99 +++++++++++++++++++ src/libstore/path-info.hh | 12 +++ src/libstore/store-api.cc | 90 ----------------- src/libstore/store-api.hh | 23 ----- src/libstore/tests/nar-info.cc | 84 ++++++++++++++++ src/libstore/tests/path-info.cc | 79 +++++++++++++++ src/libutil/tests/characterization.hh | 1 + src/nix/path-info.cc | 80 ++++++++++++++- unit-test-data/libstore/nar-info/impure.json | 21 ++++ unit-test-data/libstore/nar-info/pure.json | 11 +++ unit-test-data/libstore/path-info/impure.json | 18 ++++ unit-test-data/libstore/path-info/pure.json | 11 +++ 15 files changed, 499 insertions(+), 119 deletions(-) create mode 100644 src/libstore/tests/nar-info.cc create mode 100644 src/libstore/tests/path-info.cc create mode 100644 unit-test-data/libstore/nar-info/impure.json create mode 100644 unit-test-data/libstore/nar-info/pure.json create mode 100644 unit-test-data/libstore/path-info/impure.json create mode 100644 unit-test-data/libstore/path-info/pure.json diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 2b77c6ab7..a90812ff9 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -134,4 +134,50 @@ std::string NarInfo::to_string(const Store & store) const return res; } +nlohmann::json NarInfo::toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const +{ + using nlohmann::json; + + auto jsonObject = ValidPathInfo::toJSON(store, includeImpureInfo, hashFormat); + + if (includeImpureInfo) { + if (!url.empty()) + jsonObject["url"] = url; + if (fileHash) + jsonObject["downloadHash"] = fileHash->to_string(hashFormat, true); + if (fileSize) + jsonObject["downloadSize"] = fileSize; + } + + return jsonObject; +} + +NarInfo NarInfo::fromJSON( + const Store & store, + const StorePath & path, + const nlohmann::json & json) +{ + using nlohmann::detail::value_t; + + NarInfo res { ValidPathInfo::fromJSON(store, json) }; + res.path = path; + + if (json.contains("url")) + res.url = ensureType(valueAt(json, "url"), value_t::string); + + if (json.contains("downloadHash")) + res.fileHash = Hash::parseAny( + static_cast( + ensureType(valueAt(json, "downloadHash"), value_t::string)), + std::nullopt); + + if (json.contains("downloadSize")) + res.fileSize = ensureType(valueAt(json, "downloadSize"), value_t::number_integer); + + return res; +} + } diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh index 1b3551106..cec65ff70 100644 --- a/src/libstore/nar-info.hh +++ b/src/libstore/nar-info.hh @@ -27,6 +27,15 @@ struct NarInfo : ValidPathInfo DECLARE_CMP(NarInfo); std::string to_string(const Store & store) const; + + nlohmann::json toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const override; + static NarInfo fromJSON( + const Store & store, + const StorePath & path, + const nlohmann::json & json); }; } diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 1d900c272..45629dc7f 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -132,6 +132,36 @@ bool ParsedDerivation::useUidRange() const static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*"); +/** + * Write a JSON representation of store object metadata, such as the + * hash and the references. + */ +static nlohmann::json pathInfoToJSON( + Store & store, + const StorePathSet & storePaths) +{ + nlohmann::json::array_t jsonList = nlohmann::json::array(); + + for (auto & storePath : storePaths) { + auto info = store.queryPathInfo(storePath); + + auto & jsonPath = jsonList.emplace_back( + info->toJSON(store, false, HashFormat::Base32)); + + jsonPath["closureSize"] = ({ + uint64_t totalNarSize = 0; + StorePathSet closure; + store.computeFSClosure(info->path, closure, false, false); + for (auto & p : closure) { + auto info = store.queryPathInfo(p); + totalNarSize += info->narSize; + } + totalNarSize; + }); + } + return jsonList; +} + std::optional ParsedDerivation::prepareStructuredAttrs(Store & store, const StorePathSet & inputPaths) { auto structuredAttrs = getStructuredAttrs(); @@ -152,8 +182,8 @@ std::optional ParsedDerivation::prepareStructuredAttrs(Store & s StorePathSet storePaths; for (auto & p : *i) storePaths.insert(store.parseStorePath(p.get())); - json[i.key()] = store.pathInfoToJSON( - store.exportReferences(storePaths, inputPaths), false, true); + json[i.key()] = pathInfoToJSON(store, + store.exportReferences(storePaths, inputPaths)); } } diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index ab39e71f4..e5d5205f4 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -1,5 +1,8 @@ +#include + #include "path-info.hh" #include "store-api.hh" +#include "json-utils.hh" namespace nix { @@ -144,4 +147,100 @@ ValidPathInfo::ValidPathInfo( }, std::move(ca).raw); } + +nlohmann::json ValidPathInfo::toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const +{ + using nlohmann::json; + + auto jsonObject = json::object(); + + jsonObject["path"] = store.printStorePath(path); + jsonObject["valid"] = true; + jsonObject["narHash"] = narHash.to_string(hashFormat, true); + jsonObject["narSize"] = narSize; + + { + auto& jsonRefs = (jsonObject["references"] = json::array()); + for (auto & ref : references) + jsonRefs.emplace_back(store.printStorePath(ref)); + } + + if (ca) + jsonObject["ca"] = renderContentAddress(ca); + + if (includeImpureInfo) { + if (deriver) + jsonObject["deriver"] = store.printStorePath(*deriver); + + if (registrationTime) + jsonObject["registrationTime"] = registrationTime; + + if (ultimate) + jsonObject["ultimate"] = ultimate; + + if (!sigs.empty()) { + for (auto & sig : sigs) + jsonObject["signatures"].push_back(sig); + } + } + + return jsonObject; +} + +ValidPathInfo ValidPathInfo::fromJSON( + const Store & store, + const nlohmann::json & json) +{ + using nlohmann::detail::value_t; + + ValidPathInfo res { + StorePath(StorePath::dummy), + Hash(Hash::dummy), + }; + + ensureType(json, value_t::object); + res.path = store.parseStorePath( + static_cast( + ensureType(valueAt(json, "path"), value_t::string))); + res.narHash = Hash::parseAny( + static_cast( + ensureType(valueAt(json, "narHash"), value_t::string)), + std::nullopt); + res.narSize = ensureType(valueAt(json, "narSize"), value_t::number_integer); + + try { + auto & references = ensureType(valueAt(json, "references"), value_t::array); + for (auto & input : references) + res.references.insert(store.parseStorePath(static_cast +(input))); + } catch (Error & e) { + e.addTrace({}, "while reading key 'references'"); + throw; + } + + if (json.contains("ca")) + res.ca = ContentAddress::parse( + static_cast( + ensureType(valueAt(json, "ca"), value_t::string))); + + if (json.contains("deriver")) + res.deriver = store.parseStorePath( + static_cast( + ensureType(valueAt(json, "deriver"), value_t::string))); + + if (json.contains("registrationTime")) + res.registrationTime = ensureType(valueAt(json, "registrationTime"), value_t::number_integer); + + if (json.contains("ultimate")) + res.ultimate = ensureType(valueAt(json, "ultimate"), value_t::boolean); + + if (json.contains("signatures")) + res.sigs = valueAt(json, "signatures"); + + return res; +} + } diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh index c4c4a6366..feeda6c27 100644 --- a/src/libstore/path-info.hh +++ b/src/libstore/path-info.hh @@ -125,6 +125,18 @@ struct ValidPathInfo : UnkeyedValidPathInfo { Strings shortRefs() const; + /** + * @param includeImpureInfo If true, variable elements such as the + * registration time are included. + */ + virtual nlohmann::json toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const; + static ValidPathInfo fromJSON( + const Store & store, + const nlohmann::json & json); + ValidPathInfo(const ValidPathInfo & other) = default; ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(std::move(path)) { }; diff --git a/src/libstore/store-api.cc b/src/libstore/store-api.cc index c9ebb6c14..0f88d9b92 100644 --- a/src/libstore/store-api.cc +++ b/src/libstore/store-api.cc @@ -951,96 +951,6 @@ StorePathSet Store::exportReferences(const StorePathSet & storePaths, const Stor return paths; } -json Store::pathInfoToJSON(const StorePathSet & storePaths, - bool includeImpureInfo, bool showClosureSize, - HashFormat hashFormat, - AllowInvalidFlag allowInvalid) -{ - json::array_t jsonList = json::array(); - - for (auto & storePath : storePaths) { - auto& jsonPath = jsonList.emplace_back(json::object()); - - try { - auto info = queryPathInfo(storePath); - - jsonPath["path"] = printStorePath(info->path); - jsonPath["valid"] = true; - jsonPath["narHash"] = info->narHash.to_string(hashFormat, true); - jsonPath["narSize"] = info->narSize; - - { - auto& jsonRefs = (jsonPath["references"] = json::array()); - for (auto & ref : info->references) - jsonRefs.emplace_back(printStorePath(ref)); - } - - if (info->ca) - jsonPath["ca"] = renderContentAddress(info->ca); - - std::pair closureSizes; - - if (showClosureSize) { - closureSizes = getClosureSize(info->path); - jsonPath["closureSize"] = closureSizes.first; - } - - if (includeImpureInfo) { - - if (info->deriver) - jsonPath["deriver"] = printStorePath(*info->deriver); - - if (info->registrationTime) - jsonPath["registrationTime"] = info->registrationTime; - - if (info->ultimate) - jsonPath["ultimate"] = info->ultimate; - - if (!info->sigs.empty()) { - for (auto & sig : info->sigs) - jsonPath["signatures"].push_back(sig); - } - - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - - if (narInfo) { - if (!narInfo->url.empty()) - jsonPath["url"] = narInfo->url; - if (narInfo->fileHash) - jsonPath["downloadHash"] = narInfo->fileHash->to_string(hashFormat, true); - if (narInfo->fileSize) - jsonPath["downloadSize"] = narInfo->fileSize; - if (showClosureSize) - jsonPath["closureDownloadSize"] = closureSizes.second; - } - } - - } catch (InvalidPath &) { - jsonPath["path"] = printStorePath(storePath); - jsonPath["valid"] = false; - } - } - return jsonList; -} - - -std::pair Store::getClosureSize(const StorePath & storePath) -{ - uint64_t totalNarSize = 0, totalDownloadSize = 0; - StorePathSet closure; - computeFSClosure(storePath, closure, false, false); - for (auto & p : closure) { - auto info = queryPathInfo(p); - totalNarSize += info->narSize; - auto narInfo = std::dynamic_pointer_cast( - std::shared_ptr(info)); - if (narInfo) - totalDownloadSize += narInfo->fileSize; - } - return {totalNarSize, totalDownloadSize}; -} - const Store::Stats & Store::getStats() { diff --git a/src/libstore/store-api.hh b/src/libstore/store-api.hh index 6aa317e3d..32ad2aa44 100644 --- a/src/libstore/store-api.hh +++ b/src/libstore/store-api.hh @@ -80,7 +80,6 @@ typedef std::map OutputPathMap; enum CheckSigsFlag : bool { NoCheckSigs = false, CheckSigs = true }; enum SubstituteFlag : bool { NoSubstitute = false, Substitute = true }; -enum AllowInvalidFlag : bool { DisallowInvalid = false, AllowInvalid = true }; /** * Magic header of exportPath() output (obsolete). @@ -665,28 +664,6 @@ public: std::string makeValidityRegistration(const StorePathSet & paths, bool showDerivers, bool showHash); - /** - * Write a JSON representation of store path metadata, such as the - * hash and the references. - * - * @param includeImpureInfo If true, variable elements such as the - * registration time are included. - * - * @param showClosureSize If true, the closure size of each path is - * included. - */ - nlohmann::json pathInfoToJSON(const StorePathSet & storePaths, - bool includeImpureInfo, bool showClosureSize, - HashFormat hashFormat = HashFormat::Base32, - AllowInvalidFlag allowInvalid = DisallowInvalid); - - /** - * @return the size of the closure of the specified path, that is, - * the sum of the size of the NAR serialisation of each path in the - * closure. - */ - std::pair getClosureSize(const StorePath & storePath); - /** * Optimise the disk space usage of the Nix store by hard-linking files * with the same contents. diff --git a/src/libstore/tests/nar-info.cc b/src/libstore/tests/nar-info.cc new file mode 100644 index 000000000..cb92f3a28 --- /dev/null +++ b/src/libstore/tests/nar-info.cc @@ -0,0 +1,84 @@ +#include +#include + +#include "path-info.hh" + +#include "tests/characterization.hh" +#include "tests/libstore.hh" + +namespace nix { + +using nlohmann::json; + +class NarInfoTest : public CharacterizationTest, public LibStoreTest +{ + Path unitTestData = getUnitTestData() + "/libstore/nar-info"; + + Path goldenMaster(PathView testStem) const override { + return unitTestData + "/" + testStem + ".json"; + } +}; + +static NarInfo makeNarInfo(const Store & store, bool includeImpureInfo) { + NarInfo info = ValidPathInfo { + store, + "foo", + FixedOutputInfo { + .method = FileIngestionMethod::Recursive, + .hash = hashString(HashType::htSHA256, "(...)"), + + .references = { + .others = { + StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + }, + }, + .self = true, + }, + }, + Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + }; + info.narSize = 34878; + if (includeImpureInfo) { + info.deriver = StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + }; + info.registrationTime = 23423; + info.ultimate = true; + info.sigs = { "asdf", "qwer" }; + + info.url = "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz"; + info.fileHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="); + info.fileSize = 4029176; + } + return info; +} + +#define JSON_TEST(STEM, PURE) \ + TEST_F(NarInfoTest, NarInfo_ ## STEM ## _from_json) { \ + readTest(#STEM, [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + auto expected = makeNarInfo(*store, PURE); \ + NarInfo got = NarInfo::fromJSON( \ + *store, \ + expected.path, \ + encoded); \ + ASSERT_EQ(got, expected); \ + }); \ + } \ + \ + TEST_F(NarInfoTest, NarInfo_ ## STEM ## _to_json) { \ + writeTest(#STEM, [&]() -> json { \ + return makeNarInfo(*store, PURE) \ + .toJSON(*store, PURE, HashFormat::SRI); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ + } + +JSON_TEST(pure, false) +JSON_TEST(impure, true) + +} diff --git a/src/libstore/tests/path-info.cc b/src/libstore/tests/path-info.cc new file mode 100644 index 000000000..fbee751c6 --- /dev/null +++ b/src/libstore/tests/path-info.cc @@ -0,0 +1,79 @@ +#include +#include + +#include "path-info.hh" + +#include "tests/characterization.hh" +#include "tests/libstore.hh" + +namespace nix { + +using nlohmann::json; + +class PathInfoTest : public CharacterizationTest, public LibStoreTest +{ + Path unitTestData = getUnitTestData() + "/libstore/path-info"; + + Path goldenMaster(PathView testStem) const override { + return unitTestData + "/" + testStem + ".json"; + } +}; + +static ValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { + ValidPathInfo info { + store, + "foo", + FixedOutputInfo { + .method = FileIngestionMethod::Recursive, + .hash = hashString(HashType::htSHA256, "(...)"), + + .references = { + .others = { + StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + }, + }, + .self = true, + }, + }, + Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="), + }; + info.narSize = 34878; + if (includeImpureInfo) { + info.deriver = StorePath { + "g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + }; + info.registrationTime = 23423; + info.ultimate = true; + info.sigs = { "asdf", "qwer" }; + } + return info; +} + +#define JSON_TEST(STEM, PURE) \ + TEST_F(PathInfoTest, PathInfo_ ## STEM ## _from_json) { \ + readTest(#STEM, [&](const auto & encoded_) { \ + auto encoded = json::parse(encoded_); \ + ValidPathInfo got = ValidPathInfo::fromJSON( \ + *store, \ + encoded); \ + auto expected = makePathInfo(*store, PURE); \ + ASSERT_EQ(got, expected); \ + }); \ + } \ + \ + TEST_F(PathInfoTest, PathInfo_ ## STEM ## _to_json) { \ + writeTest(#STEM, [&]() -> json { \ + return makePathInfo(*store, PURE) \ + .toJSON(*store, PURE, HashFormat::SRI); \ + }, [](const auto & file) { \ + return json::parse(readFile(file)); \ + }, [](const auto & file, const auto & got) { \ + return writeFile(file, got.dump(2) + "\n"); \ + }); \ + } + +JSON_TEST(pure, false) +JSON_TEST(impure, true) + +} diff --git a/src/libutil/tests/characterization.hh b/src/libutil/tests/characterization.hh index 6698c5239..6eb513d68 100644 --- a/src/libutil/tests/characterization.hh +++ b/src/libutil/tests/characterization.hh @@ -4,6 +4,7 @@ #include #include "types.hh" +#include "environment-variables.hh" namespace nix { diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index c16864d30..b4bdd15ba 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -9,6 +9,74 @@ #include using namespace nix; +using nlohmann::json; + +/** + * @return the total size of a set of store objects (specified by path), + * that is, the sum of the size of the NAR serialisation of each object + * in the set. + */ +static uint64_t getStoreObjectsTotalSize(Store & store, const StorePathSet & closure) +{ + uint64_t totalNarSize = 0; + for (auto & p : closure) { + totalNarSize += store.queryPathInfo(p)->narSize; + } + return totalNarSize; +} + + +/** + * Write a JSON representation of store object metadata, such as the + * hash and the references. + * + * @param showClosureSize If true, the closure size of each path is + * included. + */ +static json pathInfoToJSON( + Store & store, + const StorePathSet & storePaths, + bool showClosureSize) +{ + json::array_t jsonList = json::array(); + + for (auto & storePath : storePaths) { + try { + auto info = store.queryPathInfo(storePath); + + auto & jsonPath = jsonList.emplace_back( + info->toJSON(store, true, HashFormat::SRI)); + + if (showClosureSize) { + StorePathSet closure; + store.computeFSClosure(storePath, closure, false, false); + + jsonPath["closureSize"] = getStoreObjectsTotalSize(store, closure); + + if (auto * narInfo = dynamic_cast(&*info)) { + uint64_t totalDownloadSize = 0; + for (auto & p : closure) { + auto depInfo = store.queryPathInfo(p); + if (auto * depNarInfo = dynamic_cast(&*depInfo)) + totalDownloadSize += depNarInfo->fileSize; + else + throw Error("Missing .narinfo for dep %s of %s", + store.printStorePath(p), + store.printStorePath(storePath)); + } + jsonPath["closureDownloadSize"] = totalDownloadSize; + } + } + + } catch (InvalidPath &) { + auto & jsonPath = jsonList.emplace_back(json::object()); + jsonPath["path"] = store.printStorePath(storePath); + jsonPath["valid"] = false; + } + } + return jsonList; +} + struct CmdPathInfo : StorePathsCommand, MixJSON { @@ -87,10 +155,11 @@ struct CmdPathInfo : StorePathsCommand, MixJSON pathLen = std::max(pathLen, store->printStorePath(storePath).size()); if (json) { - std::cout << store->pathInfoToJSON( + std::cout << pathInfoToJSON( + *store, // FIXME: preserve order? StorePathSet(storePaths.begin(), storePaths.end()), - true, showClosureSize, HashFormat::SRI, AllowInvalid).dump(); + showClosureSize).dump(); } else { @@ -107,8 +176,11 @@ struct CmdPathInfo : StorePathsCommand, MixJSON if (showSize) printSize(info->narSize); - if (showClosureSize) - printSize(store->getClosureSize(info->path).first); + if (showClosureSize) { + StorePathSet closure; + store->computeFSClosure(storePath, closure, false, false); + printSize(getStoreObjectsTotalSize(*store, closure)); + } if (showSigs) { std::cout << '\t'; diff --git a/unit-test-data/libstore/nar-info/impure.json b/unit-test-data/libstore/nar-info/impure.json new file mode 100644 index 000000000..093f25025 --- /dev/null +++ b/unit-test-data/libstore/nar-info/impure.json @@ -0,0 +1,21 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + "downloadHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "downloadSize": 4029176, + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "registrationTime": 23423, + "signatures": [ + "asdf", + "qwer" + ], + "ultimate": true, + "url": "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz", + "valid": true +} diff --git a/unit-test-data/libstore/nar-info/pure.json b/unit-test-data/libstore/nar-info/pure.json new file mode 100644 index 000000000..62005d414 --- /dev/null +++ b/unit-test-data/libstore/nar-info/pure.json @@ -0,0 +1,11 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "valid": true +} diff --git a/unit-test-data/libstore/path-info/impure.json b/unit-test-data/libstore/path-info/impure.json new file mode 100644 index 000000000..c477c768c --- /dev/null +++ b/unit-test-data/libstore/path-info/impure.json @@ -0,0 +1,18 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "registrationTime": 23423, + "signatures": [ + "asdf", + "qwer" + ], + "ultimate": true, + "valid": true +} diff --git a/unit-test-data/libstore/path-info/pure.json b/unit-test-data/libstore/path-info/pure.json new file mode 100644 index 000000000..62005d414 --- /dev/null +++ b/unit-test-data/libstore/path-info/pure.json @@ -0,0 +1,11 @@ +{ + "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", + "narSize": 34878, + "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", + "references": [ + "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", + "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" + ], + "valid": true +} From a7212e169b7204f80ea67f60c855d05b72b5d4f7 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Thu, 26 Oct 2023 20:01:36 -0400 Subject: [PATCH 21/55] Include `compression` in the `NarInfo` JSON format It was forgotten before. --- src/libstore/nar-info.cc | 5 +++++ src/libstore/tests/nar-info.cc | 1 + unit-test-data/libstore/nar-info/impure.json | 1 + 3 files changed, 7 insertions(+) diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index a90812ff9..708cc7341 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -146,6 +146,8 @@ nlohmann::json NarInfo::toJSON( if (includeImpureInfo) { if (!url.empty()) jsonObject["url"] = url; + if (!compression.empty()) + jsonObject["compression"] = compression; if (fileHash) jsonObject["downloadHash"] = fileHash->to_string(hashFormat, true); if (fileSize) @@ -168,6 +170,9 @@ NarInfo NarInfo::fromJSON( if (json.contains("url")) res.url = ensureType(valueAt(json, "url"), value_t::string); + if (json.contains("compression")) + res.compression = ensureType(valueAt(json, "compression"), value_t::string); + if (json.contains("downloadHash")) res.fileHash = Hash::parseAny( static_cast( diff --git a/src/libstore/tests/nar-info.cc b/src/libstore/tests/nar-info.cc index cb92f3a28..c5b21d56b 100644 --- a/src/libstore/tests/nar-info.cc +++ b/src/libstore/tests/nar-info.cc @@ -48,6 +48,7 @@ static NarInfo makeNarInfo(const Store & store, bool includeImpureInfo) { info.sigs = { "asdf", "qwer" }; info.url = "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz"; + info.compression = "xz"; info.fileHash = Hash::parseSRI("sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc="); info.fileSize = 4029176; } diff --git a/unit-test-data/libstore/nar-info/impure.json b/unit-test-data/libstore/nar-info/impure.json index 093f25025..3f16667c9 100644 --- a/unit-test-data/libstore/nar-info/impure.json +++ b/unit-test-data/libstore/nar-info/impure.json @@ -1,5 +1,6 @@ { "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", + "compression": "xz", "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", "downloadHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", "downloadSize": 4029176, From cc46ea163024254d0b74646e1b38b19896d40040 Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sun, 22 Oct 2023 21:12:54 -0400 Subject: [PATCH 22/55] Make `nix path-info --json` return an object not array Before it returned a list of JSON objects with store object information, including the path in each object. Now, it maps the paths to JSON objects with the metadata sans path. This matches how `nix derivation show` works. Quite hillariously, none of our existing functional tests caught this change to `path-info --json` though they did use it. So just new functional tests need to be added. --- doc/manual/src/release-notes/rl-next.md | 36 ++++++++++++++++++- src/libstore/nar-info.cc | 8 +++-- src/libstore/parsed-derivations.cc | 5 +++ src/libstore/path-info.cc | 12 ++----- src/libstore/path-info.hh | 24 ++++++------- src/libstore/tests/path-info.cc | 6 ++-- src/nix/path-info.cc | 19 +++++----- src/nix/path-info.md | 12 +++---- tests/functional/local.mk | 1 + tests/functional/path-info.sh | 23 ++++++++++++ unit-test-data/libstore/nar-info/impure.json | 4 +-- unit-test-data/libstore/nar-info/pure.json | 4 +-- unit-test-data/libstore/path-info/impure.json | 4 +-- unit-test-data/libstore/path-info/pure.json | 4 +-- 14 files changed, 108 insertions(+), 54 deletions(-) create mode 100644 tests/functional/path-info.sh diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 8cd69f8fd..85e180e37 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -28,5 +28,39 @@ - The flake-specific flags `--recreate-lock-file` and `--update-input` have been removed from all commands operating on installables. They are superceded by `nix flake update`. - + - Commit signature verification for the [`builtins.fetchGit`](@docroot@/language/builtins.md#builtins-fetchGit) is added as the new [`verified-fetches` experimental feature](@docroot@/contributing/experimental-features.md#xp-feature-verified-fetches). + +- [`nix path-info --json`](@docroot@/command-ref/new-cli/nix3-path-info.md) + (experimental) now returns a JSON map rather than JSON list. + The `path` field of each object has instead become the key in th outer map, since it is unique. + The `valid` field also goes away because we just use null instead. + + - Old way: + + ```json5 + [ + { + "path": "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15", + "valid": true, + // ... + }, + { + "path": "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path", + "valid": false + } + ] + ``` + + - New way + + ```json5 + { + "/nix/store/8fv91097mbh5049i9rglc73dx6kjg3qk-bash-5.2-p15": { + // ... + }, + "/nix/store/wffw7l0alvs3iw94cbgi1gmmbmw99sqb-home-manager-path": null, + } + ``` + + This makes it match `nix derivation show`, which also maps store paths to information. diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index 708cc7341..ae2223fb0 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -164,8 +164,12 @@ NarInfo NarInfo::fromJSON( { using nlohmann::detail::value_t; - NarInfo res { ValidPathInfo::fromJSON(store, json) }; - res.path = path; + NarInfo res { + ValidPathInfo { + path, + UnkeyedValidPathInfo::fromJSON(store, json), + } + }; if (json.contains("url")) res.url = ensureType(valueAt(json, "url"), value_t::string); diff --git a/src/libstore/parsed-derivations.cc b/src/libstore/parsed-derivations.cc index 45629dc7f..73e55a96c 100644 --- a/src/libstore/parsed-derivations.cc +++ b/src/libstore/parsed-derivations.cc @@ -148,6 +148,11 @@ static nlohmann::json pathInfoToJSON( auto & jsonPath = jsonList.emplace_back( info->toJSON(store, false, HashFormat::Base32)); + // Add the path to the object whose metadata we are including. + jsonPath["path"] = store.printStorePath(storePath); + + jsonPath["valid"] = true; + jsonPath["closureSize"] = ({ uint64_t totalNarSize = 0; StorePathSet closure; diff --git a/src/libstore/path-info.cc b/src/libstore/path-info.cc index e5d5205f4..2d7dc972f 100644 --- a/src/libstore/path-info.cc +++ b/src/libstore/path-info.cc @@ -148,7 +148,7 @@ ValidPathInfo::ValidPathInfo( } -nlohmann::json ValidPathInfo::toJSON( +nlohmann::json UnkeyedValidPathInfo::toJSON( const Store & store, bool includeImpureInfo, HashFormat hashFormat) const @@ -157,8 +157,6 @@ nlohmann::json ValidPathInfo::toJSON( auto jsonObject = json::object(); - jsonObject["path"] = store.printStorePath(path); - jsonObject["valid"] = true; jsonObject["narHash"] = narHash.to_string(hashFormat, true); jsonObject["narSize"] = narSize; @@ -190,21 +188,17 @@ nlohmann::json ValidPathInfo::toJSON( return jsonObject; } -ValidPathInfo ValidPathInfo::fromJSON( +UnkeyedValidPathInfo UnkeyedValidPathInfo::fromJSON( const Store & store, const nlohmann::json & json) { using nlohmann::detail::value_t; - ValidPathInfo res { - StorePath(StorePath::dummy), + UnkeyedValidPathInfo res { Hash(Hash::dummy), }; ensureType(json, value_t::object); - res.path = store.parseStorePath( - static_cast( - ensureType(valueAt(json, "path"), value_t::string))); res.narHash = Hash::parseAny( static_cast( ensureType(valueAt(json, "narHash"), value_t::string)), diff --git a/src/libstore/path-info.hh b/src/libstore/path-info.hh index feeda6c27..077abc7e1 100644 --- a/src/libstore/path-info.hh +++ b/src/libstore/path-info.hh @@ -78,6 +78,18 @@ struct UnkeyedValidPathInfo DECLARE_CMP(UnkeyedValidPathInfo); virtual ~UnkeyedValidPathInfo() { } + + /** + * @param includeImpureInfo If true, variable elements such as the + * registration time are included. + */ + virtual nlohmann::json toJSON( + const Store & store, + bool includeImpureInfo, + HashFormat hashFormat) const; + static UnkeyedValidPathInfo fromJSON( + const Store & store, + const nlohmann::json & json); }; struct ValidPathInfo : UnkeyedValidPathInfo { @@ -125,18 +137,6 @@ struct ValidPathInfo : UnkeyedValidPathInfo { Strings shortRefs() const; - /** - * @param includeImpureInfo If true, variable elements such as the - * registration time are included. - */ - virtual nlohmann::json toJSON( - const Store & store, - bool includeImpureInfo, - HashFormat hashFormat) const; - static ValidPathInfo fromJSON( - const Store & store, - const nlohmann::json & json); - ValidPathInfo(const ValidPathInfo & other) = default; ValidPathInfo(StorePath && path, UnkeyedValidPathInfo info) : UnkeyedValidPathInfo(info), path(std::move(path)) { }; diff --git a/src/libstore/tests/path-info.cc b/src/libstore/tests/path-info.cc index fbee751c6..49bf623bd 100644 --- a/src/libstore/tests/path-info.cc +++ b/src/libstore/tests/path-info.cc @@ -19,8 +19,8 @@ class PathInfoTest : public CharacterizationTest, public LibStoreTest } }; -static ValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { - ValidPathInfo info { +static UnkeyedValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { + UnkeyedValidPathInfo info = ValidPathInfo { store, "foo", FixedOutputInfo { @@ -54,7 +54,7 @@ static ValidPathInfo makePathInfo(const Store & store, bool includeImpureInfo) { TEST_F(PathInfoTest, PathInfo_ ## STEM ## _from_json) { \ readTest(#STEM, [&](const auto & encoded_) { \ auto encoded = json::parse(encoded_); \ - ValidPathInfo got = ValidPathInfo::fromJSON( \ + UnkeyedValidPathInfo got = UnkeyedValidPathInfo::fromJSON( \ *store, \ encoded); \ auto expected = makePathInfo(*store, PURE); \ diff --git a/src/nix/path-info.cc b/src/nix/path-info.cc index b4bdd15ba..23198a120 100644 --- a/src/nix/path-info.cc +++ b/src/nix/path-info.cc @@ -38,20 +38,21 @@ static json pathInfoToJSON( const StorePathSet & storePaths, bool showClosureSize) { - json::array_t jsonList = json::array(); + json::object_t jsonAllObjects = json::object(); for (auto & storePath : storePaths) { + json jsonObject; + try { auto info = store.queryPathInfo(storePath); - auto & jsonPath = jsonList.emplace_back( - info->toJSON(store, true, HashFormat::SRI)); + jsonObject = info->toJSON(store, true, HashFormat::SRI); if (showClosureSize) { StorePathSet closure; store.computeFSClosure(storePath, closure, false, false); - jsonPath["closureSize"] = getStoreObjectsTotalSize(store, closure); + jsonObject["closureSize"] = getStoreObjectsTotalSize(store, closure); if (auto * narInfo = dynamic_cast(&*info)) { uint64_t totalDownloadSize = 0; @@ -64,17 +65,17 @@ static json pathInfoToJSON( store.printStorePath(p), store.printStorePath(storePath)); } - jsonPath["closureDownloadSize"] = totalDownloadSize; + jsonObject["closureDownloadSize"] = totalDownloadSize; } } } catch (InvalidPath &) { - auto & jsonPath = jsonList.emplace_back(json::object()); - jsonPath["path"] = store.printStorePath(storePath); - jsonPath["valid"] = false; + jsonObject = nullptr; } + + jsonAllObjects[store.printStorePath(storePath)] = std::move(jsonObject); } - return jsonList; + return jsonAllObjects; } diff --git a/src/nix/path-info.md b/src/nix/path-info.md index 2dda866d0..4594854eb 100644 --- a/src/nix/path-info.md +++ b/src/nix/path-info.md @@ -43,7 +43,7 @@ R""( command): ```console - # nix path-info --json --all | jq -r 'sort_by(.registrationTime)[-11:-1][].path' + # nix path-info --json --all | jq -r 'to_entries | sort_by(.value.registrationTime) | .[-11:-1][] | .key' ``` * Show the size of the entire Nix store: @@ -58,13 +58,13 @@ R""( ```console # nix path-info --json --all --closure-size \ - | jq 'map(select(.closureSize > 1e9)) | sort_by(.closureSize) | map([.path, .closureSize])' + | jq 'map_values(.closureSize | select(. < 1e9)) | to_entries | sort_by(.value)' [ …, - [ - "/nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65", - 5887562256 - ] + { + .key = "/nix/store/zqamz3cz4dbzfihki2mk7a63mbkxz9xq-nixos-system-machine-20.09.20201112.3090c65", + .value = 5887562256, + } ] ``` diff --git a/tests/functional/local.mk b/tests/functional/local.mk index fe0d0c4ed..21dabca88 100644 --- a/tests/functional/local.mk +++ b/tests/functional/local.mk @@ -120,6 +120,7 @@ nix_tests = \ flakes/show.sh \ impure-derivations.sh \ path-from-hash-part.sh \ + path-info.sh \ toString-path.sh \ read-only-store.sh \ nested-sandboxing.sh \ diff --git a/tests/functional/path-info.sh b/tests/functional/path-info.sh new file mode 100644 index 000000000..763935eb7 --- /dev/null +++ b/tests/functional/path-info.sh @@ -0,0 +1,23 @@ +source common.sh + +echo foo > $TEST_ROOT/foo +foo=$(nix store add-file $TEST_ROOT/foo) + +echo bar > $TEST_ROOT/bar +bar=$(nix store add-file $TEST_ROOT/bar) + +echo baz > $TEST_ROOT/baz +baz=$(nix store add-file $TEST_ROOT/baz) +nix-store --delete "$baz" + +diff --unified --color=always \ + <(nix path-info --json "$foo" "$bar" "$baz" | + jq --sort-keys 'map_values(.narHash)') \ + <(jq --sort-keys <<-EOF + { + "$foo": "sha256-QvtAMbUl/uvi+LCObmqOhvNOapHdA2raiI4xG5zI5pA=", + "$bar": "sha256-9fhYGu9fqxcQC2Kc81qh2RMo1QcLBUBo8U+pPn+jthQ=", + "$baz": null + } +EOF + ) diff --git a/unit-test-data/libstore/nar-info/impure.json b/unit-test-data/libstore/nar-info/impure.json index 3f16667c9..bb9791a6a 100644 --- a/unit-test-data/libstore/nar-info/impure.json +++ b/unit-test-data/libstore/nar-info/impure.json @@ -6,7 +6,6 @@ "downloadSize": 4029176, "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", "narSize": 34878, - "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", "references": [ "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" @@ -17,6 +16,5 @@ "qwer" ], "ultimate": true, - "url": "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz", - "valid": true + "url": "nar/1w1fff338fvdw53sqgamddn1b2xgds473pv6y13gizdbqjv4i5p3.nar.xz" } diff --git a/unit-test-data/libstore/nar-info/pure.json b/unit-test-data/libstore/nar-info/pure.json index 62005d414..955baec31 100644 --- a/unit-test-data/libstore/nar-info/pure.json +++ b/unit-test-data/libstore/nar-info/pure.json @@ -2,10 +2,8 @@ "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", "narSize": 34878, - "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", "references": [ "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" - ], - "valid": true + ] } diff --git a/unit-test-data/libstore/path-info/impure.json b/unit-test-data/libstore/path-info/impure.json index c477c768c..0c452cc49 100644 --- a/unit-test-data/libstore/path-info/impure.json +++ b/unit-test-data/libstore/path-info/impure.json @@ -3,7 +3,6 @@ "deriver": "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar.drv", "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", "narSize": 34878, - "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", "references": [ "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" @@ -13,6 +12,5 @@ "asdf", "qwer" ], - "ultimate": true, - "valid": true + "ultimate": true } diff --git a/unit-test-data/libstore/path-info/pure.json b/unit-test-data/libstore/path-info/pure.json index 62005d414..955baec31 100644 --- a/unit-test-data/libstore/path-info/pure.json +++ b/unit-test-data/libstore/path-info/pure.json @@ -2,10 +2,8 @@ "ca": "fixed:r:sha256:1lr187v6dck1rjh2j6svpikcfz53wyl3qrlcbb405zlh13x0khhh", "narHash": "sha256-FePFYIlMuycIXPZbWi7LGEiMmZSX9FMbaQenWBzm1Sc=", "narSize": 34878, - "path": "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo", "references": [ "/nix/store/g1w7hy3qg1w7hy3qg1w7hy3qg1w7hy3q-bar", "/nix/store/n5wkd9frr45pa74if5gpz9j7mifg27fh-foo" - ], - "valid": true + ] } From 61d6fe059e959455e156c1d57bb91155d363e983 Mon Sep 17 00:00:00 2001 From: Mel Zuser Date: Mon, 6 Nov 2023 14:13:40 -0500 Subject: [PATCH 23/55] Fix `boost::bad_format_string` exception in `builtins.addErrorContext` (#9291) * Fix boost::bad_format_string exception in builtins.addErrorContext The message passed to addTrace was incorrectly being used as a format string and this this would cause an exception when the string contained a '%', which can be hit in places where arbitrary file paths are interpolated. * add test --- src/libexpr/primops.cc | 2 +- tests/functional/lang.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/libexpr/primops.cc b/src/libexpr/primops.cc index 36340d0f9..8d3a18526 100644 --- a/src/libexpr/primops.cc +++ b/src/libexpr/primops.cc @@ -825,7 +825,7 @@ static void prim_addErrorContext(EvalState & state, const PosIdx pos, Value * * auto message = state.coerceToString(pos, *args[0], context, "while evaluating the error message passed to builtins.addErrorContext", false, false).toOwned(); - e.addTrace(nullptr, message, true); + e.addTrace(nullptr, hintfmt(message), true); throw; } } diff --git a/tests/functional/lang.sh b/tests/functional/lang.sh index c3acef5ee..12df32c87 100755 --- a/tests/functional/lang.sh +++ b/tests/functional/lang.sh @@ -23,6 +23,7 @@ nix-instantiate --trace-verbose --eval -E 'builtins.traceVerbose "Hello" 123' 2> nix-instantiate --eval -E 'builtins.traceVerbose "Hello" 123' 2>&1 | grepQuietInverse Hello nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" 123' 2>&1 | grepQuietInverse Hello expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello" (throw "Foo")' | grepQuiet Hello +expectStderr 1 nix-instantiate --show-trace --eval -E 'builtins.addErrorContext "Hello %" (throw "Foo")' | grepQuiet 'Hello %' nix-instantiate --eval -E 'let x = builtins.trace { x = x; } true; in x' \ 2>&1 | grepQuiet -E 'trace: { x = «potential infinite recursion»; }' From 867f894289437a96630579592a46a4253151f079 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Fri, 15 Sep 2023 10:49:30 -0700 Subject: [PATCH 24/55] Populate $XDG_DATA_DIRS with appropriate folder from Nix profile On non-NixOS systems, the default `nix` install does not populate the `$XDG_DATA_DIRS`. This populates it and enables things like bash-completion and `.desktop` file detection for `nix` profile installed packages. Signed-off-by: Ana Hobden --- scripts/nix-profile-daemon.fish.in | 8 ++++++++ scripts/nix-profile-daemon.sh.in | 8 ++++++++ scripts/nix-profile.fish.in | 8 ++++++++ scripts/nix-profile.sh.in | 8 ++++++++ 4 files changed, 32 insertions(+) diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index 400696812..e7b394d56 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -19,6 +19,14 @@ set __ETC_PROFILE_NIX_SOURCED 1 set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" +# Populate bash completions, .desktop files, etc +if test -n "$NIX_SSH_CERT_FILE" + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +else + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +end + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if test -n "$NIX_SSH_CERT_FILE" : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 8cfd3149e..3089cec66 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -30,6 +30,14 @@ fi export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" +# Populate bash completions, .desktop files, etc +if [ -n "${XDG_DATA_DIRS-}" ]; then + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +else + export XDG_DATA_DIRS="$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" +fi + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -n "${NIX_SSL_CERT_FILE:-}" ]; then : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile.fish.in b/scripts/nix-profile.fish.in index 731498c76..fc8fe4e97 100644 --- a/scripts/nix-profile.fish.in +++ b/scripts/nix-profile.fish.in @@ -20,6 +20,14 @@ if test -n "$HOME" && test -n "$USER" # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" + # Populate bash completions, .desktop files, etc + if test -n "$NIX_SSH_CERT_FILE" + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + else + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + end + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if test -n "$NIX_SSH_CERT_FILE" : # Allow users to override the NIX_SSL_CERT_FILE diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index c4d60cf37..a0d098588 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -32,6 +32,14 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then # This part should be kept in sync with nixpkgs:nixos/modules/programs/environment.nix export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" + # Populate bash completions, .desktop files, etc + if [ -n "${XDG_DATA_DIRS-}" ]; then + # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default + export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + else + export XDG_DATA_DIRS="$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + fi + # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. if [ -e /etc/ssl/certs/ca-certificates.crt ]; then # NixOS, Ubuntu, Debian, Gentoo, Arch export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt From 896013ec0c0d4633349ff0373bdae626667adc77 Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Thu, 21 Sep 2023 09:27:35 -0700 Subject: [PATCH 25/55] Fix bad copy-paste --- scripts/nix-profile-daemon.fish.in | 2 +- scripts/nix-profile.fish.in | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index e7b394d56..5f5a53141 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -20,7 +20,7 @@ set __ETC_PROFILE_NIX_SOURCED 1 set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" # Populate bash completions, .desktop files, etc -if test -n "$NIX_SSH_CERT_FILE" +if test -n "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else diff --git a/scripts/nix-profile.fish.in b/scripts/nix-profile.fish.in index fc8fe4e97..2523594f2 100644 --- a/scripts/nix-profile.fish.in +++ b/scripts/nix-profile.fish.in @@ -21,7 +21,7 @@ if test -n "$HOME" && test -n "$USER" set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" # Populate bash completions, .desktop files, etc - if test -n "$NIX_SSH_CERT_FILE" + if test -n "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else From 150b5aba509d169a50c6ad62100c3ad7bf00242b Mon Sep 17 00:00:00 2001 From: Ana Hobden Date: Mon, 6 Nov 2023 10:07:53 -0800 Subject: [PATCH 26/55] Update scripts/nix-profile-daemon.fish.in Co-authored-by: Valentin Gagarin --- scripts/nix-profile-daemon.fish.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index 5f5a53141..3fe9e782a 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -22,9 +22,9 @@ set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profi # Populate bash completions, .desktop files, etc if test -n "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default - set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:/nix/var/nix/profiles/default/share" else - set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:$NIX_LINK/share:/nix/var/nix/profiles/default/share" + set --export XDG_DATA_DIRS "$XDG_DATA_DIRS:/nix/var/nix/profiles/default/share" end # Set $NIX_SSL_CERT_FILE so that Nixpkgs applications like curl work. From c60eba3276d7417a7f51ef606e5b9ca580cf5e5b Mon Sep 17 00:00:00 2001 From: Felix Uhl Date: Mon, 6 Nov 2023 21:43:18 +0100 Subject: [PATCH 27/55] Add release note on XDG_DATA_DIRS change Follow-up to https://github.com/NixOS/nix/pull/8985 --- doc/manual/src/release-notes/rl-next.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 85e180e37..73ba03fc4 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -64,3 +64,8 @@ ``` This makes it match `nix derivation show`, which also maps store paths to information. + +- When Nix is installed using the [binary installer](@docroot@/installation/installing-binary.md), in supported shells (Bash, Zsh, Fish) + [`XDG_DATA_DIRS`](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html#variables) is now populated with the path to the `/share` subdirectory of the current profile. + This means that command completion scripts, `.desktop` files, and similar artifacts installed via [`nix-env`](@docroot@/command-ref/nix-env.md) or [`nix profile`](@docroot@/command-ref/new-cli/nix3-profile.md) + (experimental) can be found by any program that follows the [XDG Base Directory Specification](https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html). From 9fec62a10044629ad4758ec95f9b1e67d7aefff5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 22:21:07 +0000 Subject: [PATCH 28/55] build(deps): bump zeebe-io/backport-action from 2.0.0 to 2.1.0 Bumps [zeebe-io/backport-action](https://github.com/zeebe-io/backport-action) from 2.0.0 to 2.1.0. - [Release notes](https://github.com/zeebe-io/backport-action/releases) - [Commits](https://github.com/zeebe-io/backport-action/compare/v2.0.0...v2.1.0) --- updated-dependencies: - dependency-name: zeebe-io/backport-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/backport.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 312c211dd..893f4a56f 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -21,7 +21,7 @@ jobs: fetch-depth: 0 - name: Create backport PRs # should be kept in sync with `version` - uses: zeebe-io/backport-action@v2.0.0 + uses: zeebe-io/backport-action@v2.1.0 with: # Config README: https://github.com/zeebe-io/backport-action#backport-action github_token: ${{ secrets.GITHUB_TOKEN }} From b733f4ab29cec07cf17e1fe6580c9d2f8a4362a0 Mon Sep 17 00:00:00 2001 From: Valentin Gagarin Date: Tue, 7 Nov 2023 01:12:39 +0100 Subject: [PATCH 29/55] maintainers: refine the mission statement phrasing setting a direction falls short of what we're already doing: guide contributors. the direction aspect is still important, as that is the authoritative part. guidance is the supportive part. --- maintainers/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainers/README.md b/maintainers/README.md index 5be4f9d04..ee97c1195 100644 --- a/maintainers/README.md +++ b/maintainers/README.md @@ -2,7 +2,7 @@ ## Motivation -The team's main responsibility is to set a direction for the development of Nix and ensure that the code is in good shape. +The team's main responsibility is to guide and direct the development of Nix and ensure that the code is in good shape. We aim to achieve this by improving the contributor experience and attracting more maintainers – that is, by helping other people contributing to Nix and eventually taking responsibility – in order to scale the development process to match users' needs. From 1362a0a55aaddccef5a525e3b1179239d650bb07 Mon Sep 17 00:00:00 2001 From: Felix Uhl Date: Mon, 6 Nov 2023 23:16:05 +0100 Subject: [PATCH 30/55] Fix logic for default XDG_DATA_DIRS value The [POSIX test manpage](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/test.html) as well as the [fish test manpage](https://fishshell.com/docs/current/cmds/test.html#operators-for-text-strings) specify that `-z` will be "True if the length of string string is zero; otherwise, false." The `-n` was likely a mixup and not caught during testing of https://github.com/NixOS/nix/pull/8985 due to a lack of missing conflicting entries in `XDG_DATA_DIRS`. --- scripts/nix-profile-daemon.fish.in | 2 +- scripts/nix-profile-daemon.sh.in | 2 +- scripts/nix-profile.fish.in | 2 +- scripts/nix-profile.sh.in | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/nix-profile-daemon.fish.in b/scripts/nix-profile-daemon.fish.in index 3fe9e782a..c23aa64f0 100644 --- a/scripts/nix-profile-daemon.fish.in +++ b/scripts/nix-profile-daemon.fish.in @@ -20,7 +20,7 @@ set __ETC_PROFILE_NIX_SOURCED 1 set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" # Populate bash completions, .desktop files, etc -if test -n "$XDG_DATA_DIRS" +if test -z "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:/nix/var/nix/profiles/default/share" else diff --git a/scripts/nix-profile-daemon.sh.in b/scripts/nix-profile-daemon.sh.in index 3089cec66..c63db4648 100644 --- a/scripts/nix-profile-daemon.sh.in +++ b/scripts/nix-profile-daemon.sh.in @@ -31,7 +31,7 @@ fi export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" # Populate bash completions, .desktop files, etc -if [ -n "${XDG_DATA_DIRS-}" ]; then +if [ -z "$XDG_DATA_DIRS" ]; then # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else diff --git a/scripts/nix-profile.fish.in b/scripts/nix-profile.fish.in index 2523594f2..619df52b8 100644 --- a/scripts/nix-profile.fish.in +++ b/scripts/nix-profile.fish.in @@ -21,7 +21,7 @@ if test -n "$HOME" && test -n "$USER" set --export NIX_PROFILES "@localstatedir@/nix/profiles/default $HOME/.nix-profile" # Populate bash completions, .desktop files, etc - if test -n "$XDG_DATA_DIRS" + if test -z "$XDG_DATA_DIRS" # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default set --export XDG_DATA_DIRS "/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else diff --git a/scripts/nix-profile.sh.in b/scripts/nix-profile.sh.in index a0d098588..56e070ae1 100644 --- a/scripts/nix-profile.sh.in +++ b/scripts/nix-profile.sh.in @@ -33,7 +33,7 @@ if [ -n "$HOME" ] && [ -n "$USER" ]; then export NIX_PROFILES="@localstatedir@/nix/profiles/default $NIX_LINK" # Populate bash completions, .desktop files, etc - if [ -n "${XDG_DATA_DIRS-}" ]; then + if [ -z "$XDG_DATA_DIRS" ]; then # According to XDG spec the default is /usr/local/share:/usr/share, don't set something that prevents that default export XDG_DATA_DIRS="/usr/local/share:/usr/share:$NIX_LINK/share:/nix/var/nix/profiles/default/share" else From 74210c12feccc6c6b717c5f39c28d7ce86614e60 Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Sat, 28 Aug 2021 16:26:53 -0400 Subject: [PATCH 31/55] Shellbang support with flakes Enables shebang usage of nix shell. All arguments with `#! nix` get added to the nix invocation. This implementation does NOT set any additional arguments other than placing the script path itself as the first argument such that the interpreter can utilize it. Example below: ``` #!/usr/bin/env nix #! nix shell --quiet #! nix nixpkgs#bash #! nix nixpkgs#shellcheck #! nix nixpkgs#hello #! nix --ignore-environment --command bash # shellcheck shell=bash set -eu shellcheck "$0" || exit 1 function main { hello echo 0:"$0" 1:"$1" 2:"$2" } "$@" ``` fix: include programName usage EDIT: For posterity I've changed shellwords to shellwords2 in order not to interfere with other changes during a rebase. shellwords2 is removed in a later commit. -- roberth --- src/libutil/args.cc | 37 +++++++++++++++++++++++++ src/libutil/args.hh | 8 +++++- src/libutil/util.cc | 45 +++++++++++++++++++++++++++++++ src/libutil/util.hh | 11 +++++--- src/nix/main.cc | 2 +- tests/functional/flakes/common.sh | 5 ++-- tests/functional/flakes/flakes.sh | 15 ++++++++++- 7 files changed, 114 insertions(+), 9 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 0b65519a3..7106491fd 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -6,6 +6,7 @@ #include "users.hh" #include "json-utils.hh" +#include #include namespace nix { @@ -78,6 +79,12 @@ std::optional RootArgs::needsCompletion(std::string_view s) } void RootArgs::parseCmdline(const Strings & _cmdline) +{ + // Default via 5.1.2.2.1 in C standard + Args::parseCmdline("", _cmdline); +} + +void Args::parseCmdline(const std::string & programName, const Strings & _cmdline) { Strings pendingArgs; bool dashDash = false; @@ -93,6 +100,36 @@ void RootArgs::parseCmdline(const Strings & _cmdline) } bool argsSeen = false; + + // Heuristic to see if we're invoked as a shebang script, namely, + // if we have at least one argument, it's the name of an + // executable file, and it starts with "#!". + Strings savedArgs; + auto isNixCommand = std::regex_search(programName, std::regex("nix$")); + if (isNixCommand && cmdline.size() > 0) { + auto script = *cmdline.begin(); + try { + auto lines = tokenizeString(readFile(script), "\n"); + if (std::regex_search(lines.front(), std::regex("^#!"))) { + lines.pop_front(); + for (auto pos = std::next(cmdline.begin()); pos != cmdline.end();pos++) + savedArgs.push_back(*pos); + cmdline.clear(); + + for (auto line : lines) { + line = chomp(line); + + std::smatch match; + if (std::regex_match(line, match, std::regex("^#!\\s*nix\\s(.*)$"))) + for (const auto & word : shellwords(match[1].str())) + cmdline.push_back(word); + } + cmdline.push_back(script); + for (auto pos = savedArgs.begin(); pos != savedArgs.end();pos++) + cmdline.push_back(*pos); + } + } catch (SysError &) { } + } for (auto pos = cmdline.begin(); pos != cmdline.end(); ) { auto arg = *pos; diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 45fd678e7..1d056678d 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -27,8 +27,14 @@ class Args public: /** - * Return a short one-line description of the command. + * Parse the command line with argv0, throwing a UsageError if something + goes wrong. */ + void parseCmdline(const std::string & argv0, const Strings & cmdline); + + /** + * Return a short one-line description of the command. + */ virtual std::string description() { return ""; } virtual bool forceImpureByDefault() { return false; } diff --git a/src/libutil/util.cc b/src/libutil/util.cc index ee7a22849..6ca1dbd7a 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -5,6 +5,8 @@ #include #include #include +#include + namespace nix { @@ -136,6 +138,49 @@ std::string shellEscape(const std::string_view s) return r; } +/* Recreate the effect of the perl shellwords function, breaking up a + * string into arguments like a shell word, including escapes + */ +std::vector shellwords2(const std::string & s) +{ + std::regex whitespace("^(\\s+).*"); + auto begin = s.cbegin(); + std::vector res; + std::string cur; + enum state { + sBegin, + sQuote + }; + state st = sBegin; + auto it = begin; + for (; it != s.cend(); ++it) { + if (st == sBegin) { + std::smatch match; + if (regex_search(it, s.cend(), match, whitespace)) { + cur.append(begin, it); + res.push_back(cur); + cur.clear(); + it = match[1].second; + begin = it; + } + } + switch (*it) { + case '"': + cur.append(begin, it); + begin = it + 1; + st = st == sBegin ? sQuote : sBegin; + break; + case '\\': + /* perl shellwords mostly just treats the next char as part of the string with no special processing */ + cur.append(begin, it); + begin = ++it; + break; + } + } + cur.append(begin, it); + if (!cur.empty()) res.push_back(cur); + return res; +} void ignoreException(Verbosity lvl) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index 5f730eaf6..bcd0c1769 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -189,10 +189,13 @@ std::string toLower(const std::string & s); std::string shellEscape(const std::string_view s); -/** - * Exception handling in destructors: print an error message, then - * ignore the exception. - */ +/* Recreate the effect of the perl shellwords function, breaking up a + * string into arguments like a shell word, including escapes */ +std::vector shellwords2(const std::string & s); + + +/* Exception handling in destructors: print an error message, then + ignore the exception. */ void ignoreException(Verbosity lvl = lvlError); diff --git a/src/nix/main.cc b/src/nix/main.cc index b582fc166..16fb50806 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -428,7 +428,7 @@ void mainWrapped(int argc, char * * argv) }); try { - args.parseCmdline(argvToStrings(argc, argv)); + args.parseCmdline(programName, argvToStrings(argc, argv)); } catch (UsageError &) { if (!args.helpRequested && !args.completions) throw; } diff --git a/tests/functional/flakes/common.sh b/tests/functional/flakes/common.sh index 8aed296e6..fc45cf7bf 100644 --- a/tests/functional/flakes/common.sh +++ b/tests/functional/flakes/common.sh @@ -11,6 +11,7 @@ writeSimpleFlake() { outputs = inputs: rec { packages.$system = rec { foo = import ./simple.nix; + fooScript = (import ./shell.nix {}).foo; default = foo; }; packages.someOtherSystem = rec { @@ -24,13 +25,13 @@ writeSimpleFlake() { } EOF - cp ../simple.nix ../simple.builder.sh ../config.nix $flakeDir/ + cp ../simple.nix ../shell.nix ../simple.builder.sh ../config.nix $flakeDir/ } createSimpleGitFlake() { local flakeDir="$1" writeSimpleFlake $flakeDir - git -C $flakeDir add flake.nix simple.nix simple.builder.sh config.nix + git -C $flakeDir add flake.nix simple.nix shell.nix simple.builder.sh config.nix git -C $flakeDir commit -m 'Initial' } diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index b0038935c..c4b18a21b 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -66,7 +66,17 @@ cat > "$nonFlakeDir/README.md" < "$nonFlakeDir/shebang.sh" < Date: Mon, 14 Nov 2022 17:04:19 +0100 Subject: [PATCH 32/55] src/libutil/util.hh: Formatting --- src/libutil/util.hh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/libutil/util.hh b/src/libutil/util.hh index bcd0c1769..b7d3ac504 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -190,7 +190,7 @@ std::string shellEscape(const std::string_view s); /* Recreate the effect of the perl shellwords function, breaking up a - * string into arguments like a shell word, including escapes */ + string into arguments like a shell word, including escapes. */ std::vector shellwords2(const std::string & s); From eea5a003d99094d8488fd0d1ecd97f98d3573133 Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Mon, 14 Nov 2022 19:40:01 -0500 Subject: [PATCH 33/55] fix: test to ensure arguments are passed --- tests/functional/flakes/flakes.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index c4b18a21b..e7dffde07 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -71,8 +71,9 @@ cat > "$nonFlakeDir/shebang.sh" < Date: Mon, 14 Nov 2022 23:58:58 -0500 Subject: [PATCH 34/55] doc: shebang release notes, docs, tests fix: release notes --- doc/manual/src/release-notes/rl-next.md | 44 +++++++++ src/nix/shell.md | 117 ++++++++++++++++++++++++ tests/functional/flakes/flakes.sh | 13 +++ 3 files changed, 174 insertions(+) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 73ba03fc4..93d4f432b 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -1,5 +1,49 @@ # Release X.Y (202?-??-??) +- The experimental nix command is now a `#!-interpreter` by appending the + contents of any `#! nix` lines and the script's location to a single call. + Some examples: + ``` + #!/usr/bin/env nix + #! nix shell --file "" hello --command bash + + hello | cowsay + ``` + or with flakes: + ``` + #!/usr/bin/env nix + #! nix shell nixpkgs#bash nixpkgs#hello nixpkgs#cowsay --command bash + + hello | cowsay + ``` + or + ```bash + #! /usr/bin/env nix + #! nix shell --impure --expr + #! nix "with (import (builtins.getFlake ''nixpkgs'') {}); terraform.withPlugins (plugins: [ plugins.openstack ])" + #! nix --command bash + + terraform "$@" + ``` + or + ``` + #!/usr/bin/env nix + //! ```cargo + //! [dependencies] + //! time = "0.1.25" + //! ``` + /* + #!nix shell nixpkgs#rustc nixpkgs#rust-script nixpkgs#cargo --command rust-script + */ + fn main() { + for argument in std::env::args().skip(1) { + println!("{}", argument); + }; + println!("{}", std::env::var("HOME").expect("")); + println!("{}", time::now().rfc822z()); + } + // vim: ft=rust + ``` - [URL flake references](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references) now support [percent-encoded](https://datatracker.ietf.org/doc/html/rfc3986#section-2.1) characters. - [Path-like flake references](@docroot@/command-ref/new-cli/nix3-flake.md#path-like-syntax) now accept arbitrary unicode characters (except `#` and `?`). diff --git a/src/nix/shell.md b/src/nix/shell.md index f36919575..b0bfa1609 100644 --- a/src/nix/shell.md +++ b/src/nix/shell.md @@ -51,4 +51,121 @@ R""( provides the specified [*installables*](./nix.md#installable). If no command is specified, it starts the default shell of your user account specified by `$SHELL`. +# Use as a `#!`-interpreter + +You can use `nix` as a script interpreter to allow scripts written +in arbitrary languages to obtain their own dependencies via Nix. This is +done by starting the script with the following lines: + +```bash +#! /usr/bin/env nix +#! nix shell installables --command real-interpreter +``` + +where *real-interpreter* is the “real” script interpreter that will be +invoked by `nix shell` after it has obtained the dependencies and +initialised the environment, and *installables* are the attribute names of +the dependencies in Nixpkgs. + +The lines starting with `#! nix` specify options (see above). Note that you +cannot write `#! /usr/bin/env nix shell -i ...` because many operating systems +only allow one argument in `#!` lines. + +For example, here is a Python script that depends on Python and the +`prettytable` package: + +```python +#! /usr/bin/env nix +#! nix shell github:tomberek/-#python3With.prettytable --command python + +import prettytable + +# Print a simple table. +t = prettytable.PrettyTable(["N", "N^2"]) +for n in range(1, 10): t.add_row([n, n * n]) +print t +``` + +Similarly, the following is a Perl script that specifies that it +requires Perl and the `HTML::TokeParser::Simple` and `LWP` packages: + +```perl +#! /usr/bin/env nix +#! nix shell github:tomberek/-#perlWith.HTMLTokeParserSimple.LWP --command perl -x + +use HTML::TokeParser::Simple; + +# Fetch nixos.org and print all hrefs. +my $p = HTML::TokeParser::Simple->new(url => 'http://nixos.org/'); + +while (my $token = $p->get_tag("a")) { + my $href = $token->get_attr("href"); + print "$href\n" if $href; +} +``` + +Sometimes you need to pass a simple Nix expression to customize a +package like Terraform: + +```bash +#! /usr/bin/env nix +#! nix shell --impure --expr +#! nix "with (import (builtins.getFlake ''nixpkgs'') {}); terraform.withPlugins (plugins: [ plugins.openstack ])" +#! nix --command bash + +terraform "$@" +``` + +> **Note** +> +> You must use double quotes (`"`) when passing a simple Nix expression +> in a nix shell shebang. + +Finally, using the merging of multiple nix shell shebangs the following +Haskell script uses a specific branch of Nixpkgs/NixOS (the 21.11 stable +branch): + +```haskell +#!/usr/bin/env nix +#!nix shell --override-input nixpkgs github:NixOS/nixpkgs/nixos-21.11 +#!nix github:tomberek/-#haskellWith.download-curl.tagsoup --command runghc + +import Network.Curl.Download +import Text.HTML.TagSoup +import Data.Either +import Data.ByteString.Char8 (unpack) + +-- Fetch nixos.org and print all hrefs. +main = do + resp <- openURI "https://nixos.org/" + let tags = filter (isTagOpenName "a") $ parseTags $ unpack $ fromRight undefined resp + let tags' = map (fromAttrib "href") tags + mapM_ putStrLn $ filter (/= "") tags' +``` + +If you want to be even more precise, you can specify a specific revision +of Nixpkgs: + + #!nix shell --override-input nixpkgs github:NixOS/nixpkgs/eabc38219184cc3e04a974fe31857d8e0eac098d + +The examples above all used `-p` to get dependencies from Nixpkgs. You +can also use a Nix expression to build your own dependencies. For +example, the Python example could have been written as: + +```python +#! /usr/bin/env nix +#! nix shell --impure --file deps.nix -i python +``` + +where the file `deps.nix` in the same directory as the `#!`-script +contains: + +```nix +with import {}; +python3.withPackages (ps: with ps; [ prettytable ]) +``` + + + + )"" diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index e7dffde07..f27925493 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -80,6 +80,18 @@ chmod +x "$nonFlakeDir/shebang.sh" git -C "$nonFlakeDir" add README.md shebang.sh git -C "$nonFlakeDir" commit -m 'Initial' +cat > $nonFlakeDir/shebang-perl.sh < Date: Wed, 1 Sep 2021 02:19:51 -0400 Subject: [PATCH 35/55] Read file incrementally --- src/libutil/args.cc | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 7106491fd..80216e7ad 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -6,6 +6,8 @@ #include "users.hh" #include "json-utils.hh" +#include +#include #include #include @@ -109,14 +111,17 @@ void Args::parseCmdline(const std::string & programName, const Strings & _cmdlin if (isNixCommand && cmdline.size() > 0) { auto script = *cmdline.begin(); try { - auto lines = tokenizeString(readFile(script), "\n"); - if (std::regex_search(lines.front(), std::regex("^#!"))) { - lines.pop_front(); + std::ifstream stream(script); + char shebang[3]={0,0,0}; + stream.get(shebang,3); + if (strncmp(shebang,"#!",2) == 0){ for (auto pos = std::next(cmdline.begin()); pos != cmdline.end();pos++) savedArgs.push_back(*pos); cmdline.clear(); - for (auto line : lines) { + std::string line; + std::getline(stream,line); + while (std::getline(stream,line) && !line.empty()){ line = chomp(line); std::smatch match; From 06f3583b1c860b24f2f704f216f4db8fd1dcae9c Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Sat, 26 Nov 2022 09:06:39 -0500 Subject: [PATCH 36/55] feat: break out of shebang processing for non-comments --- src/libutil/args.cc | 3 ++- tests/functional/flakes/flakes.sh | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 80216e7ad..d90374adc 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -121,7 +121,8 @@ void Args::parseCmdline(const std::string & programName, const Strings & _cmdlin std::string line; std::getline(stream,line); - while (std::getline(stream,line) && !line.empty()){ + std::string commentChars("#/\\%@*-"); + while (std::getline(stream,line) && !line.empty() && commentChars.find(line[0]) != std::string::npos){ line = chomp(line); std::smatch match; diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index f27925493..28b5e4e0f 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -80,17 +80,17 @@ chmod +x "$nonFlakeDir/shebang.sh" git -C "$nonFlakeDir" add README.md shebang.sh git -C "$nonFlakeDir" commit -m 'Initial' -cat > $nonFlakeDir/shebang-perl.sh < $nonFlakeDir/shebang-comments.sh < Date: Tue, 3 Jan 2023 05:55:06 -0500 Subject: [PATCH 37/55] doc: remove reference to nix-shell --- src/nix/shell.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/nix/shell.md b/src/nix/shell.md index b0bfa1609..7e0e5f213 100644 --- a/src/nix/shell.md +++ b/src/nix/shell.md @@ -148,9 +148,8 @@ of Nixpkgs: #!nix shell --override-input nixpkgs github:NixOS/nixpkgs/eabc38219184cc3e04a974fe31857d8e0eac098d -The examples above all used `-p` to get dependencies from Nixpkgs. You -can also use a Nix expression to build your own dependencies. For -example, the Python example could have been written as: +You can also use a Nix expression to build your own dependencies. For example, +the Python example could have been written as: ```python #! /usr/bin/env nix @@ -166,6 +165,4 @@ python3.withPackages (ps: with ps; [ prettytable ]) ``` - - )"" From bbeddf06027424dc08742c1d54bf2fdc85ff6e8e Mon Sep 17 00:00:00 2001 From: Tom Bereknyei Date: Fri, 12 May 2023 07:44:25 -0400 Subject: [PATCH 38/55] fix: refactor parseCmdline interface --- src/libutil/args.cc | 9 ++++----- src/libutil/args.hh | 2 +- src/nix/main.cc | 5 ++++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/libutil/args.cc b/src/libutil/args.cc index d90374adc..481ed33ff 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -83,10 +83,10 @@ std::optional RootArgs::needsCompletion(std::string_view s) void RootArgs::parseCmdline(const Strings & _cmdline) { // Default via 5.1.2.2.1 in C standard - Args::parseCmdline("", _cmdline); + Args::parseCmdline(_cmdline, false); } -void Args::parseCmdline(const std::string & programName, const Strings & _cmdline) +void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) { Strings pendingArgs; bool dashDash = false; @@ -107,8 +107,7 @@ void Args::parseCmdline(const std::string & programName, const Strings & _cmdlin // if we have at least one argument, it's the name of an // executable file, and it starts with "#!". Strings savedArgs; - auto isNixCommand = std::regex_search(programName, std::regex("nix$")); - if (isNixCommand && cmdline.size() > 0) { + if (allowShebang){ auto script = *cmdline.begin(); try { std::ifstream stream(script); @@ -121,7 +120,7 @@ void Args::parseCmdline(const std::string & programName, const Strings & _cmdlin std::string line; std::getline(stream,line); - std::string commentChars("#/\\%@*-"); + static const std::string commentChars("#/\\%@*-"); while (std::getline(stream,line) && !line.empty() && commentChars.find(line[0]) != std::string::npos){ line = chomp(line); diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 1d056678d..e753dcaf6 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -30,7 +30,7 @@ public: * Parse the command line with argv0, throwing a UsageError if something goes wrong. */ - void parseCmdline(const std::string & argv0, const Strings & cmdline); + void parseCmdline(const Strings & _cmdline, bool allowShebang); /** * Return a short one-line description of the command. diff --git a/src/nix/main.cc b/src/nix/main.cc index 16fb50806..73641f6d2 100644 --- a/src/nix/main.cc +++ b/src/nix/main.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include @@ -428,7 +429,9 @@ void mainWrapped(int argc, char * * argv) }); try { - args.parseCmdline(programName, argvToStrings(argc, argv)); + auto isNixCommand = std::regex_search(programName, std::regex("nix$")); + auto allowShebang = isNixCommand && argc > 1; + args.parseCmdline(argvToStrings(argc, argv),allowShebang); } catch (UsageError &) { if (!args.helpRequested && !args.completions) throw; } From cc68ed8ff7b9e3898308a39dfdad2660bacc153f Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 12 May 2023 19:42:49 +0200 Subject: [PATCH 39/55] libcmd: lookupFileArg(): add baseDir This will allow a different base directory to be used, matching a shebang script location instead of the working directory. --- src/libcmd/common-eval-args.cc | 4 ++-- src/libcmd/common-eval-args.hh | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/libcmd/common-eval-args.cc b/src/libcmd/common-eval-args.cc index 91fa881b1..401acc38e 100644 --- a/src/libcmd/common-eval-args.cc +++ b/src/libcmd/common-eval-args.cc @@ -164,7 +164,7 @@ Bindings * MixEvalArgs::getAutoArgs(EvalState & state) return res.finish(); } -SourcePath lookupFileArg(EvalState & state, std::string_view s) +SourcePath lookupFileArg(EvalState & state, std::string_view s, CanonPath baseDir) { if (EvalSettings::isPseudoUrl(s)) { auto storePath = fetchers::downloadTarball( @@ -185,7 +185,7 @@ SourcePath lookupFileArg(EvalState & state, std::string_view s) } else - return state.rootPath(CanonPath::fromCwd(s)); + return state.rootPath(CanonPath(s, baseDir)); } } diff --git a/src/libcmd/common-eval-args.hh b/src/libcmd/common-eval-args.hh index 6359b2579..4b403d936 100644 --- a/src/libcmd/common-eval-args.hh +++ b/src/libcmd/common-eval-args.hh @@ -2,6 +2,7 @@ ///@file #include "args.hh" +#include "canon-path.hh" #include "common-args.hh" #include "search-path.hh" @@ -28,6 +29,6 @@ private: std::map autoArgs; }; -SourcePath lookupFileArg(EvalState & state, std::string_view s); +SourcePath lookupFileArg(EvalState & state, std::string_view s, CanonPath baseDir = CanonPath::fromCwd()); } From 20ff61ab252fc1d2bd69987f51a000739b24c670 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 12 May 2023 19:46:37 +0200 Subject: [PATCH 40/55] nix: Reserve shebang line syntax and only parse double backtick quotes Being restrictive about syntax leaves opportunity to improve the syntax and functionality later. --- doc/manual/src/release-notes/rl-next.md | 11 +- src/libutil/args.cc | 152 +++++++++++++++++++++++- src/libutil/util.cc | 43 ------- src/libutil/util.hh | 5 - src/nix/shell.md | 8 +- tests/functional/flakes/flakes.sh | 16 ++- 6 files changed, 177 insertions(+), 58 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 93d4f432b..4bff3c685 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -2,10 +2,13 @@ - The experimental nix command is now a `#!-interpreter` by appending the contents of any `#! nix` lines and the script's location to a single call. + + Verbatim strings may be passed in double backtick (```` `` ````) quotes. + Some examples: ``` #!/usr/bin/env nix - #! nix shell --file "" hello --command bash + #! nix shell --file ```` hello --command bash hello | cowsay ``` @@ -19,8 +22,10 @@ or ```bash #! /usr/bin/env nix - #! nix shell --impure --expr - #! nix "with (import (builtins.getFlake ''nixpkgs'') {}); terraform.withPlugins (plugins: [ plugins.openstack ])" + #! nix shell --impure --expr `` + #! nix with (import (builtins.getFlake "nixpkgs") {}); + #! nix terraform.withPlugins (plugins: [ plugins.openstack ]) + #! nix `` #! nix --command bash terraform "$@" diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 481ed33ff..ab6e0e266 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -86,6 +86,147 @@ void RootArgs::parseCmdline(const Strings & _cmdline) Args::parseCmdline(_cmdline, false); } +/** + * Basically this is `typedef std::optional Parser(std::string_view s, Strings & r);` + * + * Except we can't recursively reference the Parser typedef, so we have to write a class. + */ +struct Parser { + std::string_view remaining; + + /** + * @brief Parse the next character(s) + * + * @param r + * @return std::shared_ptr + */ + virtual void operator()(std::shared_ptr & state, Strings & r) = 0; + + Parser(std::string_view s) : remaining(s) {}; +}; + +struct ParseQuoted : public Parser { + /** + * @brief Accumulated string + * + * Parsed argument up to this point. + */ + std::string acc; + + ParseQuoted(std::string_view s) : Parser(s) {}; + + virtual void operator()(std::shared_ptr & state, Strings & r) override; +}; + + +struct ParseUnquoted : public Parser { + /** + * @brief Accumulated string + * + * Parsed argument up to this point. Empty string is not representable in + * unquoted syntax, so we use it for the initial state. + */ + std::string acc; + + ParseUnquoted(std::string_view s) : Parser(s) {}; + + virtual void operator()(std::shared_ptr & state, Strings & r) override { + if (remaining.empty()) { + if (!acc.empty()) + r.push_back(acc); + state = nullptr; // done + return; + } + switch (remaining[0]) { + case ' ': case '\t': case '\n': case '\r': + if (!acc.empty()) + r.push_back(acc); + state = std::make_shared(ParseUnquoted(remaining.substr(1))); + return; + case '`': + if (remaining.size() > 1 && remaining[1] == '`') { + state = std::make_shared(ParseQuoted(remaining.substr(2))); + return; + } + else + throw Error("single backtick is not a supported syntax in the nix shebang."); + + // reserved characters + // meaning to be determined, or may be reserved indefinitely so that + // #!nix syntax looks unambiguous + case '$': + case '*': + case '~': + case '<': + case '>': + case '|': + case ';': + case '(': + case ')': + case '[': + case ']': + case '{': + case '}': + case '\'': + case '"': + case '\\': + throw Error("unsupported unquoted character in nix shebang: " + std::string(1, remaining[0]) + ". Use double backticks to escape?"); + + case '#': + if (acc.empty()) { + throw Error ("unquoted nix shebang argument cannot start with #. Use double backticks to escape?"); + } else { + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + + default: + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + assert(false); + } +}; + +void ParseQuoted::operator()(std::shared_ptr &state, Strings & r) { + if (remaining.empty()) { + throw Error("unterminated quoted string in nix shebang"); + } + switch (remaining[0]) { + case '`': + if (remaining.size() > 1 && remaining[1] == '`') { + state = std::make_shared(ParseUnquoted(remaining.substr(2))); + r.push_back(acc); + return; + } + else { + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + default: + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } + assert(false); +} + +static Strings parseShebangContent(std::string_view s) { + Strings result; + std::shared_ptr parserState(std::make_shared(ParseUnquoted(s))); + + // trampoline == iterated strategy pattern + while (parserState) { + auto currentState = parserState; + (*currentState)(parserState, result); + } + + return result; +} + void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) { Strings pendingArgs; @@ -121,13 +262,18 @@ void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) std::string line; std::getline(stream,line); static const std::string commentChars("#/\\%@*-"); + std::string shebangContent; while (std::getline(stream,line) && !line.empty() && commentChars.find(line[0]) != std::string::npos){ line = chomp(line); std::smatch match; - if (std::regex_match(line, match, std::regex("^#!\\s*nix\\s(.*)$"))) - for (const auto & word : shellwords(match[1].str())) - cmdline.push_back(word); + // We match one space after `nix` so that we preserve indentation. + // No space is necessary for an empty line. An empty line has basically no effect. + if (std::regex_match(line, match, std::regex("^#!\\s*nix(:? |$)(.*)$"))) + shebangContent += match[2].str() + "\n"; + } + for (const auto & word : parseShebangContent(shebangContent)) { + cmdline.push_back(word); } cmdline.push_back(script); for (auto pos = savedArgs.begin(); pos != savedArgs.end();pos++) diff --git a/src/libutil/util.cc b/src/libutil/util.cc index 6ca1dbd7a..5bb3f374b 100644 --- a/src/libutil/util.cc +++ b/src/libutil/util.cc @@ -138,49 +138,6 @@ std::string shellEscape(const std::string_view s) return r; } -/* Recreate the effect of the perl shellwords function, breaking up a - * string into arguments like a shell word, including escapes - */ -std::vector shellwords2(const std::string & s) -{ - std::regex whitespace("^(\\s+).*"); - auto begin = s.cbegin(); - std::vector res; - std::string cur; - enum state { - sBegin, - sQuote - }; - state st = sBegin; - auto it = begin; - for (; it != s.cend(); ++it) { - if (st == sBegin) { - std::smatch match; - if (regex_search(it, s.cend(), match, whitespace)) { - cur.append(begin, it); - res.push_back(cur); - cur.clear(); - it = match[1].second; - begin = it; - } - } - switch (*it) { - case '"': - cur.append(begin, it); - begin = it + 1; - st = st == sBegin ? sQuote : sBegin; - break; - case '\\': - /* perl shellwords mostly just treats the next char as part of the string with no special processing */ - cur.append(begin, it); - begin = ++it; - break; - } - } - cur.append(begin, it); - if (!cur.empty()) res.push_back(cur); - return res; -} void ignoreException(Verbosity lvl) { diff --git a/src/libutil/util.hh b/src/libutil/util.hh index b7d3ac504..27faa4d6d 100644 --- a/src/libutil/util.hh +++ b/src/libutil/util.hh @@ -189,11 +189,6 @@ std::string toLower(const std::string & s); std::string shellEscape(const std::string_view s); -/* Recreate the effect of the perl shellwords function, breaking up a - string into arguments like a shell word, including escapes. */ -std::vector shellwords2(const std::string & s); - - /* Exception handling in destructors: print an error message, then ignore the exception. */ void ignoreException(Verbosity lvl = lvlError); diff --git a/src/nix/shell.md b/src/nix/shell.md index 7e0e5f213..7c315fb3f 100644 --- a/src/nix/shell.md +++ b/src/nix/shell.md @@ -109,8 +109,10 @@ package like Terraform: ```bash #! /usr/bin/env nix -#! nix shell --impure --expr -#! nix "with (import (builtins.getFlake ''nixpkgs'') {}); terraform.withPlugins (plugins: [ plugins.openstack ])" +#! nix shell --impure --expr `` +#! nix with (import (builtins.getFlake ''nixpkgs'') {}); +#! nix terraform.withPlugins (plugins: [ plugins.openstack ]) +#! nix `` #! nix --command bash terraform "$@" @@ -118,7 +120,7 @@ terraform "$@" > **Note** > -> You must use double quotes (`"`) when passing a simple Nix expression +> You must use double backticks (```` `` ````) when passing a simple Nix expression > in a nix shell shebang. Finally, using the merging of multiple nix shell shebangs the following diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 28b5e4e0f..a0a34ffa9 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -80,6 +80,7 @@ chmod +x "$nonFlakeDir/shebang.sh" git -C "$nonFlakeDir" add README.md shebang.sh git -C "$nonFlakeDir" commit -m 'Initial' +# this also tests a fairly trivial double backtick quoted string, ``--command`` cat > $nonFlakeDir/shebang-comments.sh < $nonFlakeDir/shebang-comments.sh < $nonFlakeDir/shebang-reject.sh <&1 | grepQuiet -F 'error: unsupported unquoted character in nix shebang: *. Use double backticks to escape?' From 198bc22e3b856bf2a86225c2ce5b3a7394e3ac0c Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 12 May 2023 19:54:54 +0200 Subject: [PATCH 41/55] nix: Add command baseDir to parse --expr relative to shebang script --- doc/manual/src/release-notes/rl-next.md | 1 + src/libcmd/installables.cc | 3 ++- src/libutil/args.cc | 9 +++++++++ src/libutil/args.hh | 20 ++++++++++++++++++++ tests/functional/flakes/flakes.sh | 19 +++++++++++++++++++ 5 files changed, 51 insertions(+), 1 deletion(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 4bff3c685..28b6d75f5 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -4,6 +4,7 @@ contents of any `#! nix` lines and the script's location to a single call. Verbatim strings may be passed in double backtick (```` `` ````) quotes. + `--expr` resolves relative paths based on the shebang script location. Some examples: ``` diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index e7f58556f..528643dc5 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -445,7 +445,8 @@ Installables SourceExprCommand::parseInstallables( else if (file) state->evalFile(lookupFileArg(*state, *file), *vFile); else { - auto e = state->parseExprFromString(*expr, state->rootPath(CanonPath::fromCwd())); + CanonPath dir(CanonPath::fromCwd(getCommandBaseDir())); + auto e = state->parseExprFromString(*expr, state->rootPath(dir)); state->eval(e, *vFile); } diff --git a/src/libutil/args.cc b/src/libutil/args.cc index ab6e0e266..0012b3f47 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -276,6 +276,7 @@ void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) cmdline.push_back(word); } cmdline.push_back(script); + commandBaseDir = dirOf(script); for (auto pos = savedArgs.begin(); pos != savedArgs.end();pos++) cmdline.push_back(*pos); } @@ -336,6 +337,14 @@ void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) d.completer(*completions, d.n, d.prefix); } +Path Args::getCommandBaseDir() const +{ + if (parent) + return parent->getCommandBaseDir(); + else + return commandBaseDir; +} + bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) { assert(pos != end); diff --git a/src/libutil/args.hh b/src/libutil/args.hh index e753dcaf6..9c942606e 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -24,6 +24,16 @@ class AddCompletions; class Args { + /** + * @brief The command's "working directory", but only set when top level. + * + * Use getCommandBaseDir() to get the directory regardless of whether this + * is a top-level command or subcommand. + * + * @see getCommandBaseDir() + */ + Path commandBaseDir = "."; + public: /** @@ -44,6 +54,16 @@ public: */ virtual std::string doc() { return ""; } + /** + * @brief Get the base directory for the command. + * + * @return Generally the working directory, but in case of a shebang + * interpreter, returns the directory of the script. + * + * This only returns the correct value after parseCmdline() has run. + */ + Path getCommandBaseDir() const; + protected: /** diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index a0a34ffa9..76f3495dd 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -105,6 +105,24 @@ foo EOF chmod +x $nonFlakeDir/shebang-reject.sh +cat > $nonFlakeDir/shebang-inline-expr.sh <> $nonFlakeDir/shebang-inline-expr.sh <<"EOF" +#! nix --offline shell +#! nix --impure --expr `` +#! nix let flake = (builtins.getFlake (toString ../flake1)).packages; +#! nix fooScript = flake.${builtins.currentSystem}.fooScript; +#! nix /* just a comment !@#$%^&*()__+ # */ +#! nix in fooScript +#! nix `` +#! nix --no-write-lock-file --command bash +set -ex +foo +echo "$@" +EOF +chmod +x $nonFlakeDir/shebang-inline-expr.sh + # Construct a custom registry, additionally test the --registry flag nix registry add --registry "$registry" flake1 "git+file://$flake1Dir" nix registry add --registry "$registry" flake2 "git+file://$percentEncodedFlake2Dir" @@ -552,4 +570,5 @@ expectStderr 1 nix flake metadata "$flake2Dir" --no-allow-dirty --reference-lock [[ $($nonFlakeDir/shebang.sh) = "foo" ]] [[ $($nonFlakeDir/shebang.sh "bar") = "foo"$'\n'"bar" ]] [[ $($nonFlakeDir/shebang-comments.sh ) = "foo" ]] +[[ $($nonFlakeDir/shebang-inline-expr.sh baz) = "foo"$'\n'"baz" ]] expect 1 $nonFlakeDir/shebang-reject.sh 2>&1 | grepQuiet -F 'error: unsupported unquoted character in nix shebang: *. Use double backticks to escape?' From 466271568be7d3bcf0151dc7e09899775ac31f13 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 12 May 2023 19:56:04 +0200 Subject: [PATCH 42/55] nix: Parse --file relative to shebang script --- doc/manual/src/release-notes/rl-next.md | 2 +- src/libcmd/installables.cc | 5 +++-- tests/functional/flakes/flakes.sh | 20 ++++++++++++++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 28b6d75f5..7eae5d96e 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -4,7 +4,7 @@ contents of any `#! nix` lines and the script's location to a single call. Verbatim strings may be passed in double backtick (```` `` ````) quotes. - `--expr` resolves relative paths based on the shebang script location. + `--file` and `--expr` resolve relative paths based on the script location. Some examples: ``` diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index 528643dc5..d897a01c4 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -442,8 +442,9 @@ Installables SourceExprCommand::parseInstallables( auto e = state->parseStdin(); state->eval(e, *vFile); } - else if (file) - state->evalFile(lookupFileArg(*state, *file), *vFile); + else if (file) { + state->evalFile(lookupFileArg(*state, *file, CanonPath::fromCwd(getCommandBaseDir())), *vFile); + } else { CanonPath dir(CanonPath::fromCwd(getCommandBaseDir())); auto e = state->parseExprFromString(*expr, state->rootPath(dir)); diff --git a/tests/functional/flakes/flakes.sh b/tests/functional/flakes/flakes.sh index 76f3495dd..ccf1699f9 100644 --- a/tests/functional/flakes/flakes.sh +++ b/tests/functional/flakes/flakes.sh @@ -123,6 +123,25 @@ echo "$@" EOF chmod +x $nonFlakeDir/shebang-inline-expr.sh +cat > $nonFlakeDir/fooScript.nix <<"EOF" +let flake = (builtins.getFlake (toString ../flake1)).packages; + fooScript = flake.${builtins.currentSystem}.fooScript; + in fooScript +EOF + +cat > $nonFlakeDir/shebang-file.sh <> $nonFlakeDir/shebang-file.sh <<"EOF" +#! nix --offline shell +#! nix --impure --file ./fooScript.nix +#! nix --no-write-lock-file --command bash +set -ex +foo +echo "$@" +EOF +chmod +x $nonFlakeDir/shebang-file.sh + # Construct a custom registry, additionally test the --registry flag nix registry add --registry "$registry" flake1 "git+file://$flake1Dir" nix registry add --registry "$registry" flake2 "git+file://$percentEncodedFlake2Dir" @@ -571,4 +590,5 @@ expectStderr 1 nix flake metadata "$flake2Dir" --no-allow-dirty --reference-lock [[ $($nonFlakeDir/shebang.sh "bar") = "foo"$'\n'"bar" ]] [[ $($nonFlakeDir/shebang-comments.sh ) = "foo" ]] [[ $($nonFlakeDir/shebang-inline-expr.sh baz) = "foo"$'\n'"baz" ]] +[[ $($nonFlakeDir/shebang-file.sh baz) = "foo"$'\n'"baz" ]] expect 1 $nonFlakeDir/shebang-reject.sh 2>&1 | grepQuiet -F 'error: unsupported unquoted character in nix shebang: *. Use double backticks to escape?' From 51bb69535b76060582f91e5c044d5752d8e3998b Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Fri, 12 May 2023 19:57:36 +0200 Subject: [PATCH 43/55] nix/installables.cc: Use getCommandBaseDir() where possible These usages of the working directory are perhaps unlikely to interact with shebangs, but the code is more consistent this way, and we're less likely to miss usages that do interact. --- src/libcmd/installables.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index d897a01c4..f840865d2 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -88,7 +88,7 @@ MixFlakeOptions::MixFlakeOptions() lockFlags.writeLockFile = false; lockFlags.inputOverrides.insert_or_assign( flake::parseInputPath(inputPath), - parseFlakeRef(flakeRef, absPath("."), true)); + parseFlakeRef(flakeRef, absPath(getCommandBaseDir()), true)); }}, .completer = {[&](AddCompletions & completions, size_t n, std::string_view prefix) { if (n == 0) { @@ -130,7 +130,7 @@ MixFlakeOptions::MixFlakeOptions() auto evalState = getEvalState(); auto flake = flake::lockFlake( *evalState, - parseFlakeRef(flakeRef, absPath(".")), + parseFlakeRef(flakeRef, absPath(getCommandBaseDir())), { .writeLockFile = false }); for (auto & [inputName, input] : flake.lockFile.root->inputs) { auto input2 = flake.lockFile.findInput({inputName}); // resolve 'follows' nodes @@ -294,7 +294,7 @@ void completeFlakeRefWithFragment( prefixRoot = "."; } auto flakeRefS = std::string(prefix.substr(0, hash)); - auto flakeRef = parseFlakeRef(expandTilde(flakeRefS), absPath(".")); + auto flakeRef = parseFlakeRef(expandTilde(flakeRefS), absPath(getCommandBaseDir())); auto evalCache = openEvalCache(*evalState, std::make_shared(lockFlake(*evalState, flakeRef, lockFlags))); @@ -482,7 +482,7 @@ Installables SourceExprCommand::parseInstallables( } try { - auto [flakeRef, fragment] = parseFlakeRefWithFragment(std::string { prefix }, absPath(".")); + auto [flakeRef, fragment] = parseFlakeRefWithFragment(std::string { prefix }, absPath(getCommandBaseDir())); result.push_back(make_ref( this, getEvalState(), @@ -756,7 +756,7 @@ std::vector RawInstallablesCommand::getFlakeRefsForCompletion() for (auto i : rawInstallables) res.push_back(parseFlakeRefWithFragment( expandTilde(i), - absPath(".")).first); + absPath(getCommandBaseDir())).first); return res; } @@ -778,7 +778,7 @@ std::vector InstallableCommand::getFlakeRefsForCompletion() return { parseFlakeRefWithFragment( expandTilde(_installable), - absPath(".")).first + absPath(getCommandBaseDir())).first }; } From e91fd837ee997cc1879cc9035158260f3dc7cf67 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 23 Oct 2023 16:16:51 +0200 Subject: [PATCH 44/55] Move shebang docs from rl-next to nix.md --- doc/manual/src/release-notes/rl-next.md | 48 ------------------- src/nix/nix.md | 61 +++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 48 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 7eae5d96e..608699270 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -2,54 +2,6 @@ - The experimental nix command is now a `#!-interpreter` by appending the contents of any `#! nix` lines and the script's location to a single call. - - Verbatim strings may be passed in double backtick (```` `` ````) quotes. - `--file` and `--expr` resolve relative paths based on the script location. - - Some examples: - ``` - #!/usr/bin/env nix - #! nix shell --file ```` hello --command bash - - hello | cowsay - ``` - or with flakes: - ``` - #!/usr/bin/env nix - #! nix shell nixpkgs#bash nixpkgs#hello nixpkgs#cowsay --command bash - - hello | cowsay - ``` - or - ```bash - #! /usr/bin/env nix - #! nix shell --impure --expr `` - #! nix with (import (builtins.getFlake "nixpkgs") {}); - #! nix terraform.withPlugins (plugins: [ plugins.openstack ]) - #! nix `` - #! nix --command bash - - terraform "$@" - ``` - or - ``` - #!/usr/bin/env nix - //! ```cargo - //! [dependencies] - //! time = "0.1.25" - //! ``` - /* - #!nix shell nixpkgs#rustc nixpkgs#rust-script nixpkgs#cargo --command rust-script - */ - fn main() { - for argument in std::env::args().skip(1) { - println!("{}", argument); - }; - println!("{}", std::env::var("HOME").expect("")); - println!("{}", time::now().rfc822z()); - } - // vim: ft=rust - ``` - [URL flake references](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references) now support [percent-encoded](https://datatracker.ietf.org/doc/html/rfc3986#section-2.1) characters. - [Path-like flake references](@docroot@/command-ref/new-cli/nix3-flake.md#path-like-syntax) now accept arbitrary unicode characters (except `#` and `?`). diff --git a/src/nix/nix.md b/src/nix/nix.md index 6e7e8a649..5bf82a8bf 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -238,4 +238,65 @@ operate are determined as follows: Most `nix` subcommands operate on a *Nix store*. These are documented in [`nix help-stores`](./nix3-help-stores.md). +# Shebang interpreter + +The `nix` command can be used as a `#!` interpreter. +Arguments to Nix can be passed on subsequent lines in the script. + +Verbatim strings may be passed in double backtick (```` `` ````) quotes. + +`--file` and `--expr` resolve relative paths based on the script location. + +Examples: + +``` +#!/usr/bin/env nix +#! nix shell --file ```` hello cowsay --command bash + +hello | cowsay +``` + +or with **flakes**: + +``` +#!/usr/bin/env nix +#! nix shell nixpkgs#bash nixpkgs#hello nixpkgs#cowsay --command bash + +hello | cowsay +``` + +or with an **expression**: + +```bash +#! /usr/bin/env nix +#! nix shell --impure --expr `` +#! nix with (import (builtins.getFlake "nixpkgs") {}); +#! nix terraform.withPlugins (plugins: [ plugins.openstack ]) +#! nix `` +#! nix --command bash + +terraform "$@" +``` + +or with cascading interpreters. Note that the `#! nix` lines don't need to follow after the first line, to accomodate other interpreters. + +``` +#!/usr/bin/env nix +//! ```cargo +//! [dependencies] +//! time = "0.1.25" +//! ``` +/* +#!nix shell nixpkgs#rustc nixpkgs#rust-script nixpkgs#cargo --command rust-script +*/ +fn main() { + for argument in std::env::args().skip(1) { + println!("{}", argument); + }; + println!("{}", std::env::var("HOME").expect("")); + println!("{}", time::now().rfc822z()); +} +// vim: ft=rust +``` + )"" From ffd414eb756dcb3c64348551d5dbaf674c0d4900 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 23 Oct 2023 18:38:54 +0200 Subject: [PATCH 45/55] Fix nix shebang interaction with #8131 overhaul completions --- src/libcmd/installables.cc | 4 +++- src/libutil/args.cc | 19 ++++++++----------- src/libutil/args.hh | 17 +---------------- src/libutil/args/root.hh | 14 +++++++++++++- 4 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/libcmd/installables.cc b/src/libcmd/installables.cc index f840865d2..1c6103020 100644 --- a/src/libcmd/installables.cc +++ b/src/libcmd/installables.cc @@ -294,7 +294,9 @@ void completeFlakeRefWithFragment( prefixRoot = "."; } auto flakeRefS = std::string(prefix.substr(0, hash)); - auto flakeRef = parseFlakeRef(expandTilde(flakeRefS), absPath(getCommandBaseDir())); + + // TODO: ideally this would use the command base directory instead of assuming ".". + auto flakeRef = parseFlakeRef(expandTilde(flakeRefS), absPath(".")); auto evalCache = openEvalCache(*evalState, std::make_shared(lockFlake(*evalState, flakeRef, lockFlags))); diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 0012b3f47..5ba1e5c55 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -80,12 +80,6 @@ std::optional RootArgs::needsCompletion(std::string_view s) return {}; } -void RootArgs::parseCmdline(const Strings & _cmdline) -{ - // Default via 5.1.2.2.1 in C standard - Args::parseCmdline(_cmdline, false); -} - /** * Basically this is `typedef std::optional Parser(std::string_view s, Strings & r);` * @@ -227,7 +221,7 @@ static Strings parseShebangContent(std::string_view s) { return result; } -void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) +void RootArgs::parseCmdline(const Strings & _cmdline, bool allowShebang) { Strings pendingArgs; bool dashDash = false; @@ -339,10 +333,13 @@ void Args::parseCmdline(const Strings & _cmdline, bool allowShebang) Path Args::getCommandBaseDir() const { - if (parent) - return parent->getCommandBaseDir(); - else - return commandBaseDir; + assert(parent); + return parent->getCommandBaseDir(); +} + +Path RootArgs::getCommandBaseDir() const +{ + return commandBaseDir; } bool Args::processFlag(Strings::iterator & pos, Strings::iterator end) diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 9c942606e..30a44cd10 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -24,24 +24,9 @@ class AddCompletions; class Args { - /** - * @brief The command's "working directory", but only set when top level. - * - * Use getCommandBaseDir() to get the directory regardless of whether this - * is a top-level command or subcommand. - * - * @see getCommandBaseDir() - */ - Path commandBaseDir = "."; public: - /** - * Parse the command line with argv0, throwing a UsageError if something - goes wrong. - */ - void parseCmdline(const Strings & _cmdline, bool allowShebang); - /** * Return a short one-line description of the command. */ @@ -62,7 +47,7 @@ public: * * This only returns the correct value after parseCmdline() has run. */ - Path getCommandBaseDir() const; + virtual Path getCommandBaseDir() const; protected: diff --git a/src/libutil/args/root.hh b/src/libutil/args/root.hh index bb98732a1..5c55c37a5 100644 --- a/src/libutil/args/root.hh +++ b/src/libutil/args/root.hh @@ -29,14 +29,26 @@ struct Completions final : AddCompletions */ class RootArgs : virtual public Args { + /** + * @brief The command's "working directory", but only set when top level. + * + * Use getCommandBaseDir() to get the directory regardless of whether this + * is a top-level command or subcommand. + * + * @see getCommandBaseDir() + */ + Path commandBaseDir = "."; + public: /** Parse the command line, throwing a UsageError if something goes * wrong. */ - void parseCmdline(const Strings & cmdline); + void parseCmdline(const Strings & cmdline, bool allowShebang = false); std::shared_ptr completions; + Path getCommandBaseDir() const override; + protected: friend class Args; From 589d3387769b18de9c8d42035eea7ac1e21c6fde Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 6 Nov 2023 18:19:14 +0100 Subject: [PATCH 46/55] parseShebangs: Make strings with backtick sequences representable --- src/libutil/args.cc | 32 ++++++++++++- src/libutil/args.hh | 2 + src/libutil/tests/args.cc | 94 +++++++++++++++++++++++++++++++++++++++ src/nix/nix.md | 4 +- 4 files changed, 129 insertions(+), 3 deletions(-) create mode 100644 src/libutil/tests/args.cc diff --git a/src/libutil/args.cc b/src/libutil/args.cc index 5ba1e5c55..4359c5e8e 100644 --- a/src/libutil/args.cc +++ b/src/libutil/args.cc @@ -189,12 +189,40 @@ void ParseQuoted::operator()(std::shared_ptr &state, Strings & r) { throw Error("unterminated quoted string in nix shebang"); } switch (remaining[0]) { + case ' ': + if ((remaining.size() == 3 && remaining[1] == '`' && remaining[2] == '`') + || (remaining.size() > 3 && remaining[1] == '`' && remaining[2] == '`' && remaining[3] != '`')) { + // exactly two backticks mark the end of a quoted string, but a preceding space is ignored if present. + state = std::make_shared(ParseUnquoted(remaining.substr(3))); + r.push_back(acc); + return; + } + else { + // just a normal space + acc += remaining[0]; + remaining = remaining.substr(1); + return; + } case '`': - if (remaining.size() > 1 && remaining[1] == '`') { + // exactly two backticks mark the end of a quoted string + if ((remaining.size() == 2 && remaining[1] == '`') + || (remaining.size() > 2 && remaining[1] == '`' && remaining[2] != '`')) { state = std::make_shared(ParseUnquoted(remaining.substr(2))); r.push_back(acc); return; } + + // a sequence of at least 3 backticks is one escape-backtick which is ignored, followed by any number of backticks, which are verbatim + else if (remaining.size() >= 3 && remaining[1] == '`' && remaining[2] == '`') { + // ignore "escape" backtick + remaining = remaining.substr(1); + // add the rest + while (remaining.size() > 0 && remaining[0] == '`') { + acc += '`'; + remaining = remaining.substr(1); + } + return; + } else { acc += remaining[0]; remaining = remaining.substr(1); @@ -208,7 +236,7 @@ void ParseQuoted::operator()(std::shared_ptr &state, Strings & r) { assert(false); } -static Strings parseShebangContent(std::string_view s) { +Strings parseShebangContent(std::string_view s) { Strings result; std::shared_ptr parserState(std::make_shared(ParseUnquoted(s))); diff --git a/src/libutil/args.hh b/src/libutil/args.hh index 30a44cd10..7af82b178 100644 --- a/src/libutil/args.hh +++ b/src/libutil/args.hh @@ -409,4 +409,6 @@ public: virtual void add(std::string completion, std::string description = "") = 0; }; +Strings parseShebangContent(std::string_view s); + } diff --git a/src/libutil/tests/args.cc b/src/libutil/tests/args.cc new file mode 100644 index 000000000..e7a16b0be --- /dev/null +++ b/src/libutil/tests/args.cc @@ -0,0 +1,94 @@ +#include "../args.hh" +#include + +#include + +namespace nix { + + TEST(parseShebangContent, basic) { + std::list r = parseShebangContent("hi there"); + ASSERT_EQ(r.size(), 2); + auto i = r.begin(); + ASSERT_EQ(*i++, "hi"); + ASSERT_EQ(*i++, "there"); + } + + TEST(parseShebangContent, empty) { + std::list r = parseShebangContent(""); + ASSERT_EQ(r.size(), 0); + } + + TEST(parseShebangContent, doubleBacktick) { + std::list r = parseShebangContent("``\"ain't that nice\"``"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, "\"ain't that nice\""); + } + + TEST(parseShebangContent, doubleBacktickEmpty) { + std::list r = parseShebangContent("````"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, ""); + } + + TEST(parseShebangContent, doubleBacktickMarkdownInlineCode) { + std::list r = parseShebangContent("``# I'm markdown section about `coolFunction` ``"); + ASSERT_EQ(r.size(), 1); + auto i = r.begin(); + ASSERT_EQ(*i++, "# I'm markdown section about `coolFunction`"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlockNaive) { + std::list r = parseShebangContent("``Example 1\n```nix\na: a\n``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n``nix\na: a\n``"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlockCorrect) { + std::list r = parseShebangContent("``Example 1\n````nix\na: a\n```` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n```nix\na: a\n```"); + } + + TEST(parseShebangContent, doubleBacktickMarkdownCodeBlock2) { + std::list r = parseShebangContent("``Example 1\n````nix\na: a\n````\nExample 2\n````nix\na: a\n```` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "Example 1\n```nix\na: a\n```\nExample 2\n```nix\na: a\n```"); + } + + TEST(parseShebangContent, singleBacktickInDoubleBacktickQuotes) { + std::list r = parseShebangContent("``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "`"); + } + + TEST(parseShebangContent, singleBacktickAndSpaceInDoubleBacktickQuotes) { + std::list r = parseShebangContent("``` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "` "); + } + + TEST(parseShebangContent, doubleBacktickInDoubleBacktickQuotes) { + std::list r = parseShebangContent("````` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 1); + ASSERT_EQ(*i++, "``"); + } + + TEST(parseShebangContent, increasingQuotes) { + std::list r = parseShebangContent("```` ``` `` ````` `` `````` ``"); + auto i = r.begin(); + ASSERT_EQ(r.size(), 4); + ASSERT_EQ(*i++, ""); + ASSERT_EQ(*i++, "`"); + ASSERT_EQ(*i++, "``"); + ASSERT_EQ(*i++, "```"); + } + +} \ No newline at end of file diff --git a/src/nix/nix.md b/src/nix/nix.md index 5bf82a8bf..eb150f03b 100644 --- a/src/nix/nix.md +++ b/src/nix/nix.md @@ -243,7 +243,9 @@ in [`nix help-stores`](./nix3-help-stores.md). The `nix` command can be used as a `#!` interpreter. Arguments to Nix can be passed on subsequent lines in the script. -Verbatim strings may be passed in double backtick (```` `` ````) quotes. +Verbatim strings may be passed in double backtick (```` `` ````) quotes. +Sequences of _n_ backticks of 3 or longer are parsed as _n-1_ literal backticks. +A single space before the closing ```` `` ```` is ignored if present. `--file` and `--expr` resolve relative paths based on the script location. From ab69dc4da3ce5dc270e11b460c5b99f549bcf5d3 Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Mon, 6 Nov 2023 19:15:36 +0100 Subject: [PATCH 47/55] Test parseShebangContent round trip --- src/libutil/tests/args.cc | 74 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/src/libutil/tests/args.cc b/src/libutil/tests/args.cc index e7a16b0be..bea74a8c8 100644 --- a/src/libutil/tests/args.cc +++ b/src/libutil/tests/args.cc @@ -1,7 +1,9 @@ #include "../args.hh" +#include "libutil/fs-sink.hh" #include #include +#include namespace nix { @@ -91,4 +93,76 @@ namespace nix { ASSERT_EQ(*i++, "```"); } + +#ifndef COVERAGE + +// quick and dirty +static inline std::string escape(std::string_view s_) { + + std::string_view s = s_; + std::string r = "``"; + + // make a guess to allocate ahead of time + r.reserve( + // plain chars + s.size() + // quotes + + 5 + // some "escape" backticks + + s.size() / 8); + + while (!s.empty()) { + if (s[0] == '`' && s.size() >= 2 && s[1] == '`') { + // escape it + r += "`"; + while (!s.empty() && s[0] == '`') { + r += "`"; + s = s.substr(1); + } + } else { + r += s[0]; + s = s.substr(1); + } + } + + if (!r.empty() + && ( + r[r.size() - 1] == '`' + || r[r.size() - 1] == ' ' + )) { + r += " "; + } + + r += "``"; + + return r; +}; + +RC_GTEST_PROP( + parseShebangContent, + prop_round_trip_single, + (const std::string & orig)) +{ + auto escaped = escape(orig); + // RC_LOG() << "escaped: <[[" << escaped << "]]>" << std::endl; + auto ss = parseShebangContent(escaped); + RC_ASSERT(ss.size() == 1); + RC_ASSERT(*ss.begin() == orig); +} + +RC_GTEST_PROP( + parseShebangContent, + prop_round_trip_two, + (const std::string & one, const std::string & two)) +{ + auto ss = parseShebangContent(escape(one) + " " + escape(two)); + RC_ASSERT(ss.size() == 2); + auto i = ss.begin(); + RC_ASSERT(*i++ == one); + RC_ASSERT(*i++ == two); +} + + +#endif + } \ No newline at end of file From c0c7c4b6cd1aefaa65fc11fcdc8df7e608960825 Mon Sep 17 00:00:00 2001 From: Felix Uhl Date: Tue, 7 Nov 2023 21:16:18 +0100 Subject: [PATCH 48/55] Link to shebang interpreter docs from release notes --- doc/manual/src/release-notes/rl-next.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index 608699270..da81ed83b 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -1,7 +1,8 @@ # Release X.Y (202?-??-??) -- The experimental nix command is now a `#!-interpreter` by appending the - contents of any `#! nix` lines and the script's location to a single call. +- The experimental nix command can now act as a [shebang interpreter](@docroot@/command-ref/new-cli/nix.md#shebang-interpreter) + by appending the contents of any `#! nix` lines and the script's location to a single call. + - [URL flake references](@docroot@/command-ref/new-cli/nix3-flake.md#flake-references) now support [percent-encoded](https://datatracker.ietf.org/doc/html/rfc3986#section-2.1) characters. - [Path-like flake references](@docroot@/command-ref/new-cli/nix3-flake.md#path-like-syntax) now accept arbitrary unicode characters (except `#` and `?`). From 6a47629530469b84d33444119e43c61effa88aa4 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Tue, 7 Nov 2023 13:38:52 +0100 Subject: [PATCH 49/55] Fix initialization of struct members (wrong order) --- src/libstore/nar-accessor.cc | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 02993680f..58740b685 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -60,12 +60,22 @@ struct NarAccessor : public SourceAccessor void createDirectory(const Path & path) override { - createMember(path, {Type::tDirectory, false, 0, 0}); + createMember(path, NarMember{ .stat = { + .type = Type::tDirectory, + .fileSize = 0, + .isExecutable = false, + .narOffset = 0 + } }); } void createRegularFile(const Path & path) override { - createMember(path, {Type::tRegular, false, 0, 0}); + createMember(path, NarMember{ .stat = { + .type = Type::tRegular, + .fileSize = 0, + .isExecutable = false, + .narOffset = 0 + } }); } void closeRegularFile() override From 77dceb2844276217bff321d80f601297f3581530 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Tue, 7 Nov 2023 13:39:10 +0100 Subject: [PATCH 50/55] Drop obsolete assert and cast --- src/libstore/nar-accessor.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 58740b685..cfbbbd80b 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -88,9 +88,8 @@ struct NarAccessor : public SourceAccessor void preallocateContents(uint64_t size) override { - assert(size <= std::numeric_limits::max()); auto & st = parents.top()->stat; - st.fileSize = (uint64_t) size; + st.fileSize = size; st.narOffset = pos; } From c581143e0c6721fba455e6616e7c6f07e47000b1 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Tue, 7 Nov 2023 13:39:30 +0100 Subject: [PATCH 51/55] Use structured binding for json iteration --- src/libstore/nar-accessor.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index cfbbbd80b..1a4936736 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -137,9 +137,8 @@ struct NarAccessor : public SourceAccessor if (type == "directory") { member.stat = {.type = Type::tDirectory}; - for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) { - std::string name = i.key(); - recurse(member.children[name], i.value()); + for (const auto &[name, function] : v["entries"].items()) { + recurse(member.children[name], function); } } else if (type == "regular") { member.stat = { From df8bfe84cca62c89417d676af2c6fbe3bcf23412 Mon Sep 17 00:00:00 2001 From: Jacek Galowicz Date: Tue, 7 Nov 2023 13:40:21 +0100 Subject: [PATCH 52/55] Fix consts and casts --- src/libstore/nar-accessor.cc | 6 +++--- src/libstore/nar-accessor.hh | 2 +- src/libstore/nar-info.cc | 4 ++-- src/libstore/nar-info.hh | 4 ++-- src/libstore/worker-protocol.cc | 8 ++++---- src/libstore/worker-protocol.hh | 4 ++-- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/libstore/nar-accessor.cc b/src/libstore/nar-accessor.cc index 1a4936736..15b05fe25 100644 --- a/src/libstore/nar-accessor.cc +++ b/src/libstore/nar-accessor.cc @@ -161,7 +161,7 @@ struct NarAccessor : public SourceAccessor { NarMember * current = &root; - for (auto & i : path) { + for (const auto & i : path) { if (current->stat.type != Type::tDirectory) return nullptr; auto child = current->children.find(std::string(i)); if (child == current->children.end()) return nullptr; @@ -194,7 +194,7 @@ struct NarAccessor : public SourceAccessor throw Error("path '%1%' inside NAR file is not a directory", path); DirEntries res; - for (auto & child : i.children) + for (const auto & child : i.children) res.insert_or_assign(child.first, std::nullopt); return res; @@ -259,7 +259,7 @@ json listNar(ref accessor, const CanonPath & path, bool recurse) { obj["entries"] = json::object(); json &res2 = obj["entries"]; - for (auto & [name, type] : accessor->readDirectory(path)) { + for (const auto & [name, type] : accessor->readDirectory(path)) { if (recurse) { res2[name] = listNar(accessor, path + name, true); } else diff --git a/src/libstore/nar-accessor.hh b/src/libstore/nar-accessor.hh index 433774524..0043897c6 100644 --- a/src/libstore/nar-accessor.hh +++ b/src/libstore/nar-accessor.hh @@ -25,7 +25,7 @@ ref makeNarAccessor(Source & source); * readFile() method of the accessor to get the contents of files * inside the NAR. */ -typedef std::function GetNarBytes; +using GetNarBytes = std::function; ref makeLazyNarAccessor( const std::string & listing, diff --git a/src/libstore/nar-info.cc b/src/libstore/nar-info.cc index ae2223fb0..1060a6c8b 100644 --- a/src/libstore/nar-info.cc +++ b/src/libstore/nar-info.cc @@ -38,12 +38,12 @@ NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & while (pos < s.size()) { size_t colon = s.find(':', pos); - if (colon == std::string::npos) throw corrupt("expecting ':'"); + if (colon == s.npos) throw corrupt("expecting ':'"); std::string name(s, pos, colon - pos); size_t eol = s.find('\n', colon + 2); - if (eol == std::string::npos) throw corrupt("expecting '\\n'"); + if (eol == s.npos) throw corrupt("expecting '\\n'"); std::string value(s, colon + 2, eol - colon - 2); diff --git a/src/libstore/nar-info.hh b/src/libstore/nar-info.hh index cec65ff70..fd538a7cd 100644 --- a/src/libstore/nar-info.hh +++ b/src/libstore/nar-info.hh @@ -17,10 +17,10 @@ struct NarInfo : ValidPathInfo uint64_t fileSize = 0; NarInfo() = delete; - NarInfo(const Store & store, std::string && name, ContentAddressWithReferences && ca, Hash narHash) + NarInfo(const Store & store, std::string name, ContentAddressWithReferences ca, Hash narHash) : ValidPathInfo(store, std::move(name), std::move(ca), narHash) { } - NarInfo(StorePath && path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } + NarInfo(StorePath path, Hash narHash) : ValidPathInfo(std::move(path), narHash) { } NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { } NarInfo(const Store & store, const std::string & s, const std::string & whence); diff --git a/src/libstore/worker-protocol.cc b/src/libstore/worker-protocol.cc index 1d202f8d1..7118558b1 100644 --- a/src/libstore/worker-protocol.cc +++ b/src/libstore/worker-protocol.cc @@ -31,14 +31,14 @@ std::optional WorkerProto::Serialise>::r void WorkerProto::Serialise>::write(const Store & store, WorkerProto::WriteConn conn, const std::optional & optTrusted) { if (!optTrusted) - conn.to << (uint8_t)0; + conn.to << uint8_t{0}; else { switch (*optTrusted) { case Trusted: - conn.to << (uint8_t)1; + conn.to << uint8_t{1}; break; case NotTrusted: - conn.to << (uint8_t)2; + conn.to << uint8_t{2}; break; default: assert(false); @@ -101,7 +101,7 @@ void WorkerProto::Serialise::write(const Store & store, Worker BuildResult WorkerProto::Serialise::read(const Store & store, WorkerProto::ReadConn conn) { BuildResult res; - res.status = (BuildResult::Status) readInt(conn.from); + res.status = static_cast(readInt(conn.from)); conn.from >> res.errorMsg; if (GET_PROTOCOL_MINOR(conn.version) >= 29) { conn.from diff --git a/src/libstore/worker-protocol.hh b/src/libstore/worker-protocol.hh index dcd54ad16..25d544ba7 100644 --- a/src/libstore/worker-protocol.hh +++ b/src/libstore/worker-protocol.hh @@ -171,7 +171,7 @@ enum struct WorkerProto::Op : uint64_t */ inline Sink & operator << (Sink & sink, WorkerProto::Op op) { - return sink << (uint64_t) op; + return sink << static_cast(op); } /** @@ -181,7 +181,7 @@ inline Sink & operator << (Sink & sink, WorkerProto::Op op) */ inline std::ostream & operator << (std::ostream & s, WorkerProto::Op op) { - return s << (uint64_t) op; + return s << static_cast(op); } /** From d854e8696b549de15ac9960736a39302d7846ece Mon Sep 17 00:00:00 2001 From: John Ericson Date: Sat, 4 Nov 2023 15:57:43 -0400 Subject: [PATCH 53/55] Specify the size of the experimental feature array in a more robust way See doc comment for details. --- src/libutil/experimental-features.cc | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 47edca3a5..6b9427423 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -12,7 +12,19 @@ struct ExperimentalFeatureDetails std::string_view description; }; -constexpr std::array xpFeatureDetails = {{ +/** + * If two different PRs both add an experimental feature, and we just + * used a number for this, we *woudln't* get merge conflict and the + * counter will be incremented once instead of twice, causing a build + * failure. + * + * By instead defining this instead as 1 + the bottom experimental + * feature, we either have no issue at all if few features are not added + * at the end of the list, or a proper merge conflict if they are. + */ +constexpr size_t numXpFeatures = 1 + static_cast(Xp::VerifiedFetches); + +constexpr std::array xpFeatureDetails = {{ { .tag = Xp::CaDerivations, .name = "ca-derivations", From f0adb72c238aa6f21c2f07fe2e434a3adcea975d Mon Sep 17 00:00:00 2001 From: John Ericson Date: Wed, 8 Nov 2023 23:08:05 -0500 Subject: [PATCH 54/55] Mark `fetchTree` as unstable again As discussed in our last meeting, we need a bit more time, but we are "time boxing" the work left to do to ensure there is not unbounded delay. Rather than putting it back underneath `flakes`, though, put it underneath its own `fetch-tree` experimental feature (which `flakes` includes/implies). This signals our commitment to the plan to stabilize it first without waiting to go through the rest of Flakes, and also will give users a "release candidate" when we get closer to stabilization. This reverts commit 4112dd1fc93c9ff03a5a4e8be773c45ebefbbd1f. --- doc/manual/src/release-notes/rl-next.md | 3 ++- src/libexpr/primops/fetchTree.cc | 1 + src/libutil/config.cc | 8 ++++++-- src/libutil/experimental-features.cc | 15 +++++++++++++++ src/libutil/experimental-features.hh | 1 + tests/functional/config.sh | 3 ++- 6 files changed, 27 insertions(+), 4 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index da81ed83b..addb7de71 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -17,7 +17,8 @@ - `nix-shell` shebang lines now support single-quoted arguments. -- `builtins.fetchTree` is now marked as stable. +- `builtins.fetchTree` is now unstable under its own experimental feature, [`fetch-tree`](@docroot@/contributing/experimental-features.md#xp-fetch-tree). + As described in the document for that feature, this is because we anticipate polishing it and then stabilizing it before the rest of Flakes. - The interface for creating and updating lock files has been overhauled: diff --git a/src/libexpr/primops/fetchTree.cc b/src/libexpr/primops/fetchTree.cc index 3717b9022..8031bf809 100644 --- a/src/libexpr/primops/fetchTree.cc +++ b/src/libexpr/primops/fetchTree.cc @@ -228,6 +228,7 @@ static RegisterPrimOp primop_fetchTree({ ``` )", .fun = prim_fetchTree, + .experimentalFeature = Xp::FetchTree, }); static void fetch(EvalState & state, const PosIdx pos, Value * * args, Value & v, diff --git a/src/libutil/config.cc b/src/libutil/config.cc index 8e7901133..eddc4a588 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -330,9 +330,13 @@ template<> std::set BaseSetting res; for (auto & s : tokenizeString(str)) { - if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) + if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { res.insert(thisXpFeature.value()); - else + // FIXME: Replace this hack with a proper notion of + // experimental feature implications/dependencies. + if (thisXpFeature.value() == Xp::Flakes) + res.insert(Xp::FetchTree); + } else warn("unknown experimental feature '%s'", s); } return res; diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index 6b9427423..b0edbe185 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -74,6 +74,21 @@ constexpr std::array xpFeatureDetails flake`](@docroot@/command-ref/new-cli/nix3-flake.md) for details. )", }, + { + .tag = Xp::FetchTree, + .name = "fetch-tree", + .description = R"( + Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. + + `fetchTree` exposes a larger suite of fetching functionality in a more systematic way. + The same fetching functionality is always used for for + [`flakes`](#xp-feature-flakes). + + This built-in was previously guarded by the `flakes` experimental feature because of that overlap, + but since the plan is to work on stabilizing this first (due 2024 Q1), we are putting it underneath a separate feature. + Once we've made the changes we want to make, enabling just this feature will serve as a "release candidate" --- allowing users to try out the functionality we want to stabilize and not any other functionality we don't yet want to, in isolation. + )", + }, { .tag = Xp::NixCommand, .name = "nix-command", diff --git a/src/libutil/experimental-features.hh b/src/libutil/experimental-features.hh index f005cc9ee..f580fd030 100644 --- a/src/libutil/experimental-features.hh +++ b/src/libutil/experimental-features.hh @@ -20,6 +20,7 @@ enum struct ExperimentalFeature CaDerivations, ImpureDerivations, Flakes, + FetchTree, NixCommand, RecursiveNix, NoUrlLiterals, diff --git a/tests/functional/config.sh b/tests/functional/config.sh index 723f575ed..0780c55d0 100644 --- a/tests/functional/config.sh +++ b/tests/functional/config.sh @@ -50,7 +50,8 @@ exp_cores=$(nix show-config | grep '^cores' | cut -d '=' -f 2 | xargs) exp_features=$(nix show-config | grep '^experimental-features' | cut -d '=' -f 2 | xargs) [[ $prev != $exp_cores ]] [[ $exp_cores == "4242" ]] -[[ $exp_features == "flakes nix-command" ]] +# flakes implies fetch-tree +[[ $exp_features == "fetch-tree flakes nix-command" ]] # Test that it's possible to retrieve a single setting's value val=$(nix show-config | grep '^warn-dirty' | cut -d '=' -f 2 | xargs) From 12953b942c7752568070e0b703b448dd8f16f21b Mon Sep 17 00:00:00 2001 From: Robert Hensing Date: Thu, 9 Nov 2023 07:08:56 +0100 Subject: [PATCH 55/55] Fixup docs --- doc/manual/src/release-notes/rl-next.md | 2 +- src/libutil/config.cc | 2 -- src/libutil/experimental-features.cc | 5 ++--- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/doc/manual/src/release-notes/rl-next.md b/doc/manual/src/release-notes/rl-next.md index addb7de71..1e6ad6922 100644 --- a/doc/manual/src/release-notes/rl-next.md +++ b/doc/manual/src/release-notes/rl-next.md @@ -17,7 +17,7 @@ - `nix-shell` shebang lines now support single-quoted arguments. -- `builtins.fetchTree` is now unstable under its own experimental feature, [`fetch-tree`](@docroot@/contributing/experimental-features.md#xp-fetch-tree). +- `builtins.fetchTree` is now its own experimental feature, [`fetch-tree`](@docroot@/contributing/experimental-features.md#xp-fetch-tree). As described in the document for that feature, this is because we anticipate polishing it and then stabilizing it before the rest of Flakes. - The interface for creating and updating lock files has been overhauled: diff --git a/src/libutil/config.cc b/src/libutil/config.cc index eddc4a588..96a0a4df8 100644 --- a/src/libutil/config.cc +++ b/src/libutil/config.cc @@ -332,8 +332,6 @@ template<> std::set BaseSetting(str)) { if (auto thisXpFeature = parseExperimentalFeature(s); thisXpFeature) { res.insert(thisXpFeature.value()); - // FIXME: Replace this hack with a proper notion of - // experimental feature implications/dependencies. if (thisXpFeature.value() == Xp::Flakes) res.insert(Xp::FetchTree); } else diff --git a/src/libutil/experimental-features.cc b/src/libutil/experimental-features.cc index b0edbe185..88fb55713 100644 --- a/src/libutil/experimental-features.cc +++ b/src/libutil/experimental-features.cc @@ -80,9 +80,8 @@ constexpr std::array xpFeatureDetails .description = R"( Enable the use of the [`fetchTree`](@docroot@/language/builtins.md#builtins-fetchTree) built-in function in the Nix language. - `fetchTree` exposes a larger suite of fetching functionality in a more systematic way. - The same fetching functionality is always used for for - [`flakes`](#xp-feature-flakes). + `fetchTree` exposes a large suite of fetching functionality in a more systematic way. + The [`flakes`](#xp-feature-flakes) feature flag always enables `fetch-tree`. This built-in was previously guarded by the `flakes` experimental feature because of that overlap, but since the plan is to work on stabilizing this first (due 2024 Q1), we are putting it underneath a separate feature.