Merge branch 'master' into nix-repl-flakes

This commit is contained in:
Tom Bereknyei 2022-06-15 09:17:03 -04:00
commit 51268ceb79
42 changed files with 305 additions and 164 deletions

View file

@ -88,7 +88,7 @@ jobs:
fetch-depth: 0
- uses: cachix/install-nix-action@v17
- run: echo CACHIX_NAME="$(echo $GITHUB_REPOSITORY-install-tests | tr "[A-Z]/" "[a-z]-")" >> $GITHUB_ENV
- run: echo NIX_VERSION="$(nix-instantiate --eval -E '(import ./default.nix).defaultPackage.${builtins.currentSystem}.version' | tr -d \")" >> $GITHUB_ENV
- run: echo NIX_VERSION="$(nix --experimental-features 'nix-command flakes' eval .\#default.version | tr -d \")" >> $GITHUB_ENV
- uses: cachix/cachix-action@v10
if: needs.check_cachix.outputs.secret == 'true'
with:

View file

@ -12,6 +12,12 @@
[`--dry-run`]
[{`--out-link` | `-o`} *outlink*]
# Disambiguation
This man page describes the command `nix-build`, which is distinct from `nix
build`. For documentation on the latter, run `nix build --help` or see `man
nix3-build`.
# Description
The `nix-build` command builds the derivations described by the Nix

View file

@ -15,6 +15,12 @@
[`--keep` *name*]
{{`--packages` | `-p`} {*packages* | *expressions*} … | [*path*]}
# Disambiguation
This man page describes the command `nix-shell`, which is distinct from `nix
shell`. For documentation on the latter, run `nix shell --help` or see `man
nix3-shell`.
# Description
The command `nix-shell` will build the dependencies of the specified

View file

@ -18,16 +18,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1645296114,
"narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=",
"lastModified": 1653988320,
"narHash": "sha256-ZaqFFsSDipZ6KVqriwM34T739+KLYJvNmCWzErjAg7c=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1",
"rev": "2fa57ed190fd6c7c746319444f34b5917666e5c1",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-21.05-small",
"ref": "nixos-22.05-small",
"repo": "nixpkgs",
"type": "github"
}

View file

@ -1,7 +1,7 @@
{
description = "The purely functional package manager";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-21.05-small";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-22.05-small";
inputs.nixpkgs-regression.url = "github:NixOS/nixpkgs/215d4d0fd80ca5163643b03a33fde804a29cc1e2";
inputs.lowdown-src = { url = "github:kristapsdz/lowdown"; flake = false; };
@ -36,7 +36,7 @@
)
);
forAllStdenvs = stdenvs: f: nixpkgs.lib.genAttrs stdenvs (stdenv: f stdenv);
forAllStdenvs = f: nixpkgs.lib.genAttrs stdenvs (stdenv: f stdenv);
# Memoize nixpkgs for different platforms for efficiency.
nixpkgsFor =
@ -88,7 +88,6 @@
"LDFLAGS=-fuse-ld=gold"
];
nativeBuildDeps =
[
buildPackages.bison
@ -314,6 +313,7 @@
for LIB in $out/lib/*.dylib; do
chmod u+w $LIB
install_name_tool -id $LIB $LIB
install_name_tool -delete_rpath ${boost}/lib/ $LIB || true
done
install_name_tool -change ${boost}/lib/libboost_system.dylib $out/lib/libboost_system.dylib $out/lib/libboost_thread.dylib
''}
@ -370,10 +370,10 @@
++ lib.optional (currentStdenv.isLinux || currentStdenv.isDarwin) libsodium
++ lib.optional currentStdenv.isDarwin darwin.apple_sdk.frameworks.Security;
configureFlags = ''
--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}
--with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix}
'';
configureFlags = [
"--with-dbi=${perlPackages.DBI}/${pkgs.perl.libPrefix}"
"--with-dbd-sqlite=${perlPackages.DBDSQLite}/${pkgs.perl.libPrefix}"
];
enableParallelBuilding = true;
@ -405,7 +405,7 @@
# A Nixpkgs overlay that overrides the 'nix' and
# 'nix.perl-bindings' packages.
overlay = overlayFor (p: p.stdenv);
overlays.default = overlayFor (p: p.stdenv);
hydraJobs = {
@ -430,7 +430,7 @@
value = let
nixpkgsCross = import nixpkgs {
inherit system crossSystem;
overlays = [ self.overlay ];
overlays = [ self.overlays.default ];
};
in binaryTarball nixpkgsFor.${system} self.packages.${system}."nix-${crossSystem}" nixpkgsCross;
}) crossSystems));
@ -476,31 +476,31 @@
tests.remoteBuilds = import ./tests/remote-builds.nix {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
overlay = self.overlays.default;
};
tests.nix-copy-closure = import ./tests/nix-copy-closure.nix {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
overlay = self.overlays.default;
};
tests.nssPreload = (import ./tests/nss-preload.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
overlay = self.overlays.default;
});
tests.githubFlakes = (import ./tests/github-flakes.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
overlay = self.overlays.default;
});
tests.sourcehutFlakes = (import ./tests/sourcehut-flakes.nix rec {
system = "x86_64-linux";
inherit nixpkgs;
inherit (self) overlay;
overlay = self.overlays.default;
});
tests.setuid = nixpkgs.lib.genAttrs
@ -508,7 +508,7 @@
(system:
import ./tests/setuid.nix rec {
inherit nixpkgs system;
inherit (self) overlay;
overlay = self.overlays.default;
});
# Make sure that nix-env still produces the exact same result
@ -553,8 +553,9 @@
dockerImage = self.hydraJobs.dockerImage.${system};
});
packages = forAllSystems (system: {
packages = forAllSystems (system: rec {
inherit (nixpkgsFor.${system}) nix;
default = nix;
} // (nixpkgs.lib.optionalAttrs (builtins.elem system linux64BitSystems) {
nix-static = let
nixpkgs = nixpkgsFor.${system}.pkgsStatic;
@ -610,12 +611,14 @@
ln -s ${image} $image
echo "file binary-dist $image" >> $out/nix-support/hydra-build-products
'';
} // builtins.listToAttrs (map (crossSystem: {
}
// builtins.listToAttrs (map (crossSystem: {
name = "nix-${crossSystem}";
value = let
nixpkgsCross = import nixpkgs {
inherit system crossSystem;
overlays = [ self.overlay ];
overlays = [ self.overlays.default ];
};
in with commonDeps nixpkgsCross; nixpkgsCross.stdenv.mkDerivation {
name = "nix-${version}";
@ -649,44 +652,45 @@
doInstallCheck = true;
installCheckFlags = "sysconfdir=$(out)/etc";
};
}) crossSystems)) // (builtins.listToAttrs (map (stdenvName:
}) (if system == "x86_64-linux" then crossSystems else [])))
// (builtins.listToAttrs (map (stdenvName:
nixpkgsFor.${system}.lib.nameValuePair
"nix-${stdenvName}"
nixpkgsFor.${system}."${stdenvName}Packages".nix
) stdenvs)));
defaultPackage = forAllSystems (system: self.packages.${system}.nix);
devShells = forAllSystems (system:
forAllStdenvs (stdenv:
with nixpkgsFor.${system};
with commonDeps pkgs;
nixpkgsFor.${system}.${stdenv}.mkDerivation {
name = "nix";
devShell = forAllSystems (system: self.devShells.${system}.stdenvPackages);
outputs = [ "out" "dev" "doc" ];
devShells = forAllSystemsAndStdenvs (system: stdenv:
with nixpkgsFor.${system};
with commonDeps pkgs;
nativeBuildInputs = nativeBuildDeps;
buildInputs = buildDeps ++ propagatedDeps ++ awsDeps;
nixpkgsFor.${system}.${stdenv}.mkDerivation {
name = "nix";
inherit configureFlags;
outputs = [ "out" "dev" "doc" ];
enableParallelBuilding = true;
nativeBuildInputs = nativeBuildDeps;
buildInputs = buildDeps ++ propagatedDeps ++ awsDeps;
installFlags = "sysconfdir=$(out)/etc";
inherit configureFlags;
shellHook =
''
PATH=$prefix/bin:$PATH
unset PYTHONPATH
export MANPATH=$out/share/man:$MANPATH
enableParallelBuilding = true;
installFlags = "sysconfdir=$(out)/etc";
shellHook =
''
PATH=$prefix/bin:$PATH
unset PYTHONPATH
export MANPATH=$out/share/man:$MANPATH
# Make bash completion work.
XDG_DATA_DIRS+=:$out/share
'';
});
# Make bash completion work.
XDG_DATA_DIRS+=:$out/share
'';
}
)
// { default = self.devShells.${system}.stdenv; }
);
};
}

View file

@ -442,8 +442,9 @@ add_nix_vol_fstab_line() {
local escaped_mountpoint="${NIX_ROOT/ /'\\\'040}"
shift
# wrap `ex` to work around a problem with vim plugins breaking exit codes
# (see github.com/NixOS/nix/issues/5468)
# wrap `ex` to work around problems w/ vim features breaking exit codes
# - plugins (see github.com/NixOS/nix/issues/5468): -u NONE
# - swap file: -n
#
# the first draft used `--noplugin`, but github.com/NixOS/nix/issues/6462
# suggests we need the less-semantic `-u NONE`
@ -456,7 +457,7 @@ add_nix_vol_fstab_line() {
# minver 10.12.6 seems to have released with vim 7.4
cat > "$SCRATCH/ex_cleanroom_wrapper" <<EOF
#!/bin/sh
/usr/bin/ex -u NONE "\$@"
/usr/bin/ex -u NONE -n "\$@"
EOF
chmod 755 "$SCRATCH/ex_cleanroom_wrapper"
@ -650,9 +651,9 @@ EOF
task "Configuring /etc/synthetic.conf to make a mount-point at $NIX_ROOT" >&2
# technically /etc/synthetic.d/nix is supported in Big Sur+
# but handling both takes even more code...
# Note: `-u NONE` disables vim plugins/rc; see note on --clean earlier
# See earlier note; `-u NONE` disables vim plugins/rc, `-n` skips swapfile
_sudo "to add Nix to /etc/synthetic.conf" \
/usr/bin/ex -u NONE /etc/synthetic.conf <<EOF
/usr/bin/ex -u NONE -n /etc/synthetic.conf <<EOF
:a
${NIX_ROOT:1}
.
@ -820,8 +821,8 @@ setup_volume_daemon() {
local volume_uuid="$2"
if ! test_voldaemon; then
task "Configuring LaunchDaemon to mount '$NIX_VOLUME_LABEL'" >&2
# Note: `-u NONE` disables vim plugins/rc; see note on --clean earlier
_sudo "to install the Nix volume mounter" /usr/bin/ex -u NONE "$NIX_VOLUME_MOUNTD_DEST" <<EOF
# See earlier note; `-u NONE` disables vim plugins/rc, `-n` skips swapfile
_sudo "to install the Nix volume mounter" /usr/bin/ex -u NONE -n "$NIX_VOLUME_MOUNTD_DEST" <<EOF
:a
$(generate_mount_daemon "$cmd_type" "$volume_uuid")
.

View file

@ -638,6 +638,17 @@ place_channel_configuration() {
fi
}
check_selinux() {
if command -v getenforce > /dev/null 2>&1; then
if ! [ "$(getenforce)" = "Disabled" ]; then
failure <<EOF
Nix does not work with selinux enabled yet!
see https://github.com/NixOS/nix/issues/2374
EOF
fi
fi
}
welcome_to_nix() {
ok "Welcome to the Multi-User Nix Installation"
@ -866,6 +877,8 @@ when I need to.
EOF
fi
check_selinux
if [ "$(uname -s)" = "Darwin" ]; then
# shellcheck source=./install-darwin-multi-user.sh
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"

View file

@ -146,7 +146,8 @@ SourceExprCommand::SourceExprCommand(bool supportReadOnlyMode)
.shortName = 'f',
.description =
"Interpret installables as attribute paths relative to the Nix expression stored in *file*. "
"If *file* is the character -, then a Nix expression will be read from standard input.",
"If *file* is the character -, then a Nix expression will be read from standard input. "
"Implies `--impure`.",
.category = installablesCategory,
.labels = {"file"},
.handler = {&file},

View file

@ -1066,6 +1066,11 @@ struct CmdRepl : InstallablesCommand
return file.has_value() or expr.has_value();
}
bool forceImpureByDefault() override
{
return true;
}
std::string description() override
{
return "start an interactive environment for evaluating Nix expressions";

View file

@ -150,7 +150,7 @@ public:
if (debugRepl)
runDebugRepl(&error, env, expr);
throw error;
throw std::move(error);
}
template<class E>
@ -165,7 +165,7 @@ public:
runDebugRepl(&e, last.env, last.expr);
}
throw e;
throw std::move(e);
}

View file

@ -513,6 +513,15 @@ LockedFlake lockFlake(
if (!lockFlags.allowMutable && !input.ref->input.isLocked())
throw Error("cannot update flake input '%s' in pure mode", inputPathS);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
"original" field, rather than the
override. This ensures that the override isn't
nuked the next time we update the lock
file. That is, overrides are sticky unless you
use --no-write-lock-file. */
auto ref = input2.ref ? *input2.ref : *input.ref;
if (input.isFlake) {
Path localPath = parentPath;
FlakeRef localRef = *input.ref;
@ -524,15 +533,7 @@ LockedFlake lockFlake(
auto inputFlake = getFlake(state, localRef, useRegistries, flakeCache, inputPath);
/* Note: in case of an --override-input, we use
the *original* ref (input2.ref) for the
"original" field, rather than the
override. This ensures that the override isn't
nuked the next time we update the lock
file. That is, overrides are sticky unless you
use --no-write-lock-file. */
auto childNode = std::make_shared<LockedNode>(
inputFlake.lockedRef, input2.ref ? *input2.ref : *input.ref);
auto childNode = std::make_shared<LockedNode>(inputFlake.lockedRef, ref);
node->inputs.insert_or_assign(id, childNode);
@ -560,7 +561,7 @@ LockedFlake lockFlake(
auto [sourceInfo, resolvedRef, lockedRef] = fetchOrSubstituteTree(
state, *input.ref, useRegistries, flakeCache);
node->inputs.insert_or_assign(id,
std::make_shared<LockedNode>(lockedRef, *input.ref, false));
std::make_shared<LockedNode>(lockedRef, ref, false));
}
}

View file

@ -150,16 +150,16 @@ struct Expr
};
#define COMMON_METHODS \
void show(const SymbolTable & symbols, std::ostream & str) const; \
void eval(EvalState & state, Env & env, Value & v); \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env);
void show(const SymbolTable & symbols, std::ostream & str) const override; \
void eval(EvalState & state, Env & env, Value & v) override; \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) override;
struct ExprInt : Expr
{
NixInt n;
Value v;
ExprInt(NixInt n) : n(n) { v.mkInt(n); };
Value * maybeThunk(EvalState & state, Env & env);
Value * maybeThunk(EvalState & state, Env & env) override;
COMMON_METHODS
};
@ -168,7 +168,7 @@ struct ExprFloat : Expr
NixFloat nf;
Value v;
ExprFloat(NixFloat nf) : nf(nf) { v.mkFloat(nf); };
Value * maybeThunk(EvalState & state, Env & env);
Value * maybeThunk(EvalState & state, Env & env) override;
COMMON_METHODS
};
@ -177,7 +177,7 @@ struct ExprString : Expr
std::string s;
Value v;
ExprString(std::string s) : s(std::move(s)) { v.mkString(this->s.data()); };
Value * maybeThunk(EvalState & state, Env & env);
Value * maybeThunk(EvalState & state, Env & env) override;
COMMON_METHODS
};
@ -186,7 +186,7 @@ struct ExprPath : Expr
std::string s;
Value v;
ExprPath(std::string s) : s(std::move(s)) { v.mkPath(this->s.c_str()); };
Value * maybeThunk(EvalState & state, Env & env);
Value * maybeThunk(EvalState & state, Env & env) override;
COMMON_METHODS
};
@ -213,7 +213,7 @@ struct ExprVar : Expr
ExprVar(Symbol name) : name(name) { };
ExprVar(const PosIdx & pos, Symbol name) : pos(pos), name(name) { };
Value * maybeThunk(EvalState & state, Env & env);
Value * maybeThunk(EvalState & state, Env & env) override;
PosIdx getPos() const override { return pos; }
COMMON_METHODS
};
@ -326,7 +326,7 @@ struct ExprLambda : Expr
: pos(pos), formals(formals), body(body)
{
}
void setName(Symbol name);
void setName(Symbol name) override;
std::string showNamePos(const EvalState & state) const;
inline bool hasFormals() const { return formals != nullptr; }
PosIdx getPos() const override { return pos; }
@ -395,15 +395,15 @@ struct ExprOpNot : Expr
Expr * e1, * e2; \
name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \
name(const PosIdx & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \
void show(const SymbolTable & symbols, std::ostream & str) const \
void show(const SymbolTable & symbols, std::ostream & str) const override \
{ \
str << "("; e1->show(symbols, str); str << " " s " "; e2->show(symbols, str); str << ")"; \
} \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) \
void bindVars(EvalState & es, const std::shared_ptr<const StaticEnv> & env) override \
{ \
e1->bindVars(es, env); e2->bindVars(es, env); \
} \
void eval(EvalState & state, Env & env, Value & v); \
void eval(EvalState & state, Env & env, Value & v) override; \
PosIdx getPos() const override { return pos; } \
};

View file

@ -85,8 +85,9 @@ std::optional<std::string> readHead(const Path & path)
bool storeCachedHead(const std::string& actualUrl, const std::string& headRef)
{
Path cacheDir = getCachePath(actualUrl);
auto gitDir = ".";
try {
runProgram("git", true, { "-C", cacheDir, "symbolic-ref", "--", "HEAD", headRef });
runProgram("git", true, { "-C", cacheDir, "--git-dir", gitDir, "symbolic-ref", "--", "HEAD", headRef });
} catch (ExecError &e) {
if (!WIFEXITED(e.status)) throw;
return false;
@ -182,7 +183,7 @@ WorkdirInfo getWorkdirInfo(const Input & input, const Path & workdir)
if (hasHead) {
// Using git diff is preferrable over lower-level operations here,
// because its conceptually simpler and we only need the exit code anyways.
auto gitDiffOpts = Strings({ "-C", workdir, "diff", "HEAD", "--quiet"});
auto gitDiffOpts = Strings({ "-C", workdir, "--git-dir", gitDir, "diff", "HEAD", "--quiet"});
if (!submodules) {
// Changes in submodules should only make the tree dirty
// when those submodules will be copied as well.
@ -203,6 +204,7 @@ WorkdirInfo getWorkdirInfo(const Input & input, const Path & workdir)
std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, const Path & workdir, const WorkdirInfo & workdirInfo)
{
const bool submodules = maybeGetBoolAttr(input.attrs, "submodules").value_or(false);
auto gitDir = ".git";
if (!fetchSettings.allowDirty)
throw Error("Git tree '%s' is dirty", workdir);
@ -210,7 +212,7 @@ std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, co
if (fetchSettings.warnDirty)
warn("Git tree '%s' is dirty", workdir);
auto gitOpts = Strings({ "-C", workdir, "ls-files", "-z" });
auto gitOpts = Strings({ "-C", workdir, "--git-dir", gitDir, "ls-files", "-z" });
if (submodules)
gitOpts.emplace_back("--recurse-submodules");
@ -240,7 +242,7 @@ std::pair<StorePath, Input> fetchFromWorkdir(ref<Store> store, Input & input, co
// modified dirty file?
input.attrs.insert_or_assign(
"lastModified",
workdirInfo.hasHead ? std::stoull(runProgram("git", true, { "-C", actualPath, "log", "-1", "--format=%ct", "--no-show-signature", "HEAD" })) : 0);
workdirInfo.hasHead ? std::stoull(runProgram("git", true, { "-C", actualPath, "--git-dir", gitDir, "log", "-1", "--format=%ct", "--no-show-signature", "HEAD" })) : 0);
return {std::move(storePath), input};
}

View file

@ -381,7 +381,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
Headers headers = makeHeadersWithAuthTokens(host);
std::string ref_uri;
std::string refUri;
if (ref == "HEAD") {
auto file = store->toRealPath(
downloadFile(store, fmt("%s/HEAD", base_url), "source", false, headers).storePath);
@ -393,10 +393,11 @@ struct SourceHutInputScheme : GitArchiveInputScheme
if (!remoteLine) {
throw BadURL("in '%d', couldn't resolve HEAD ref '%d'", input.to_string(), ref);
}
ref_uri = remoteLine->target;
refUri = remoteLine->target;
} else {
ref_uri = fmt("refs/(heads|tags)/%s", ref);
refUri = fmt("refs/(heads|tags)/%s", ref);
}
std::regex refRegex(refUri);
auto file = store->toRealPath(
downloadFile(store, fmt("%s/info/refs", base_url), "source", false, headers).storePath);
@ -406,7 +407,7 @@ struct SourceHutInputScheme : GitArchiveInputScheme
std::optional<std::string> id;
while(!id && getline(is, line)) {
auto parsedLine = git::parseLsRemoteLine(line);
if (parsedLine && parsedLine->reference == ref_uri)
if (parsedLine && parsedLine->reference && std::regex_match(*parsedLine->reference, refRegex))
id = parsedLine->target;
}

View file

@ -135,6 +135,7 @@ void LocalStore::addTempRoot(const StorePath & path)
state->fdRootsSocket.close();
goto restart;
}
throw;
}
}
@ -153,6 +154,7 @@ void LocalStore::addTempRoot(const StorePath & path)
state->fdRootsSocket.close();
goto restart;
}
throw;
} catch (EndOfFile & e) {
debug("GC socket disconnected");
state->fdRootsSocket.close();

View file

@ -802,7 +802,7 @@ public:
)"};
Setting<StringSet> ignoredAcls{
this, {"security.selinux", "system.nfs4_acl"}, "ignored-acls",
this, {"security.selinux", "system.nfs4_acl", "security.csm"}, "ignored-acls",
R"(
A list of ACLs that should be ignored, normally Nix attempts to
remove all ACLs from files and directories in the Nix store, but

View file

@ -69,6 +69,7 @@ protected:
} catch (SysError & e) {
if (e.errNo == ENOENT)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
throw;
}
}

View file

@ -67,13 +67,26 @@ bool UserLock::findFreeUser() {
#if __linux__
/* Get the list of supplementary groups of this build user. This
is usually either empty or contains a group such as "kvm". */
supplementaryGIDs.resize(10);
int ngroups = supplementaryGIDs.size();
int err = getgrouplist(pw->pw_name, pw->pw_gid,
supplementaryGIDs.data(), &ngroups);
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'", pw->pw_name);
int ngroups = 32; // arbitrary initial guess
supplementaryGIDs.resize(ngroups);
int err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
// Our initial size of 32 wasn't sufficient, the correct size has
// been stored in ngroups, so we try again.
if (err == -1) {
supplementaryGIDs.resize(ngroups);
err = getgrouplist(pw->pw_name, pw->pw_gid, supplementaryGIDs.data(),
&ngroups);
}
// If it failed once more, then something must be broken.
if (err == -1)
throw Error("failed to get list of supplementary groups for '%1%'",
pw->pw_name);
// Finally, trim back the GID list to its real size
supplementaryGIDs.resize(ngroups);
#endif

View file

@ -1,7 +1,7 @@
create table if not exists ValidPaths (
id integer primary key autoincrement not null,
path text unique not null,
hash text not null,
hash text not null, -- base16 representation
registrationTime integer not null,
deriver text,
narSize integer,

View file

@ -25,6 +25,8 @@ public:
/* Return a short one-line description of the command. */
virtual std::string description() { return ""; }
virtual bool forceImpureByDefault() { return false; }
/* Return documentation about this command, in Markdown format. */
virtual std::string doc() { return ""; }

View file

@ -8,9 +8,9 @@ std::string hiliteMatches(
std::string_view prefix,
std::string_view postfix)
{
// Avoid copy on zero matches
// Avoid extra work on zero matches
if (matches.size() == 0)
return (std::string) s;
return std::string(s);
std::sort(matches.begin(), matches.end(), [](const auto & a, const auto & b) {
return a.position() < b.position();

View file

@ -700,4 +700,19 @@ template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
std::string showBytes(uint64_t bytes);
/* Provide an addition operator between strings and string_views
inexplicably omitted from the standard library. */
inline std::string operator + (const std::string & s1, std::string_view s2)
{
auto s = s1;
s.append(s2);
return s;
}
inline std::string operator + (std::string && s, std::string_view s2)
{
s.append(s2);
return std::move(s);
}
}

View file

@ -543,6 +543,8 @@ static void main_nix_build(int argc, char * * argv)
restoreProcessContext();
logger->stop();
execvp(shell->c_str(), argPtrs.data());
throw SysError("executing shell '%s'", *shell);
@ -601,6 +603,8 @@ static void main_nix_build(int argc, char * * argv)
outPaths.push_back(outputPath);
}
logger->stop();
for (auto & path : outPaths)
std::cout << store->printStorePath(path) << '\n';
}

View file

@ -37,6 +37,7 @@ void removeOldGenerations(std::string dir)
link = readLink(path);
} catch (SysError & e) {
if (e.errNo == ENOENT) continue;
throw;
}
if (link.find("link") != std::string::npos) {
printInfo(format("removing old generations of profile %1%") % path);

View file

@ -1485,7 +1485,7 @@ static int main_nix_env(int argc, char * * argv)
if (globals.profile == "")
globals.profile = getDefaultProfile();
op(globals, opFlags, opArgs);
op(globals, std::move(opFlags), std::move(opArgs));
globals.state->printStats();

View file

@ -1093,7 +1093,7 @@ static int main_nix_store(int argc, char * * argv)
if (op != opDump && op != opRestore) /* !!! hack */
store = openStore();
op(opFlags, opArgs);
op(std::move(opFlags), std::move(opArgs));
return 0;
}

View file

@ -30,7 +30,7 @@ convert-secret-to-public` to get the corresponding public key for
verifying signed store paths.
The mandatory argument `--key-name` specifies a key name (such as
`cache.example.org-1). It is used to look up keys on the client when
`cache.example.org-1`). It is used to look up keys on the client when
it verifies signatures. It can be anything, but its suggested to use
the host name of your cache (e.g. `cache.example.org`) with a suffix
denoting the number of the key (to be incremented every time you need

View file

@ -380,6 +380,9 @@ void mainWrapped(int argc, char * * argv)
settings.ttlPositiveNarInfoCache = 0;
}
if (args.command->second->forceImpureByDefault() && !evalSettings.pureEval.overridden) {
evalSettings.pureEval = false;
}
args.command->second->prepare();
args.command->second->run();
}

View file

@ -18,16 +18,24 @@ using namespace nix;
std::string wrap(std::string prefix, std::string s)
{
return prefix + s + ANSI_NORMAL;
return concatStrings(prefix, s, ANSI_NORMAL);
}
struct CmdSearch : InstallableCommand, MixJSON
{
std::vector<std::string> res;
std::vector<std::string> excludeRes;
CmdSearch()
{
expectArgs("regex", &res);
addFlag(Flag {
.longName = "exclude",
.shortName = 'e',
.description = "Hide packages whose attribute path, name or description contain *regex*.",
.labels = {"regex"},
.handler = Handler(&excludeRes),
});
}
std::string description() override
@ -62,11 +70,16 @@ struct CmdSearch : InstallableCommand, MixJSON
res.push_back("^");
std::vector<std::regex> regexes;
std::vector<std::regex> excludeRegexes;
regexes.reserve(res.size());
excludeRegexes.reserve(excludeRes.size());
for (auto & re : res)
regexes.push_back(std::regex(re, std::regex::extended | std::regex::icase));
for (auto & re : excludeRes)
excludeRegexes.emplace_back(re, std::regex::extended | std::regex::icase);
auto state = getEvalState();
auto jsonOut = json ? std::make_unique<JSONObject>(std::cout) : nullptr;
@ -106,6 +119,14 @@ struct CmdSearch : InstallableCommand, MixJSON
std::vector<std::smatch> nameMatches;
bool found = false;
for (auto & regex : excludeRegexes) {
if (
std::regex_search(attrPath2, regex)
|| std::regex_search(name.name, regex)
|| std::regex_search(description, regex))
return;
}
for (auto & regex : regexes) {
found = false;
auto addAll = [&found](std::sregex_iterator it, std::vector<std::smatch> & vec) {
@ -133,15 +154,15 @@ struct CmdSearch : InstallableCommand, MixJSON
jsonElem.attr("version", name.version);
jsonElem.attr("description", description);
} else {
auto name2 = hiliteMatches(name.name, std::move(nameMatches), ANSI_GREEN, "\e[0;2m");
auto name2 = hiliteMatches(name.name, nameMatches, ANSI_GREEN, "\e[0;2m");
if (results > 1) logger->cout("");
logger->cout(
"* %s%s",
wrap("\e[0;1m", hiliteMatches(attrPath2, std::move(attrPathMatches), ANSI_GREEN, "\e[0;1m")),
wrap("\e[0;1m", hiliteMatches(attrPath2, attrPathMatches, ANSI_GREEN, "\e[0;1m")),
name.version != "" ? " (" + name.version + ")" : "");
if (description != "")
logger->cout(
" %s", hiliteMatches(description, std::move(descriptionMatches), ANSI_GREEN, ANSI_NORMAL));
" %s", hiliteMatches(description, descriptionMatches, ANSI_GREEN, ANSI_NORMAL));
}
}
}

View file

@ -43,12 +43,23 @@ R""(
# nix search nixpkgs 'firefox|chromium'
```
* Search for packages containing `git'`and either `frontend` or `gui`:
* Search for packages containing `git` and either `frontend` or `gui`:
```console
# nix search nixpkgs git 'frontend|gui'
```
* Search for packages containing `neovim` but hide ones containing either `gui` or `python`:
```console
# nix search nixpkgs neovim -e 'python|gui'
```
or
```console
# nix search nixpkgs neovim -e 'python' -e 'gui'
```
# Description
`nix search` searches *installable* (which must be evaluatable, e.g. a

View file

@ -34,7 +34,7 @@ struct CmdUpgradeNix : MixDryRun, StoreCommand
std::string description() override
{
return "upgrade Nix to the latest stable version";
return "upgrade Nix to the stable version declared in Nixpkgs";
}
std::string doc() override

View file

@ -2,7 +2,7 @@ R""(
# Examples
* Upgrade Nix to the latest stable version:
* Upgrade Nix to the stable version declared in Nixpkgs:
```console
# nix upgrade-nix
@ -16,8 +16,11 @@ R""(
# Description
This command upgrades Nix to the latest version. By default, it
locates the directory containing the `nix` binary in the `$PATH`
This command upgrades Nix to the stable version declared in Nixpkgs.
This stable version is defined in [nix-fallback-paths.nix](https://github.com/NixOS/nixpkgs/raw/master/nixos/modules/installer/tools/nix-fallback-paths.nix)
and updated manually. It may not always be the latest tagged release.
By default, it locates the directory containing the `nix` binary in the `$PATH`
environment variable. If that directory is a Nix profile, it will
upgrade the `nix` package in that profile to the latest stable binary
release.

View file

@ -58,7 +58,7 @@ EOF
nix eval --file - <<EOF
with (builtins.fromJSON (builtins.readFile ./flake.lock));
# Url inputs whose extension doesnt match a know archive format should
# Url inputs whose extension doesnt match a known archive format should
# not be unpacked by default
assert (nodes.no_ext_default_no_unpack.locked.type == "file");
assert (nodes.no_ext_default_no_unpack.locked.unpack or false == false);

View file

@ -32,7 +32,7 @@ for repo in $flake1Dir $flake2Dir $flake3Dir $flake7Dir $templatesDir $nonFlakeD
rm -rf $repo $repo.tmp
mkdir -p $repo
# Give one repo a non-master initial branch.
# Give one repo a non-main initial branch.
extraArgs=
if [[ $repo == $flake2Dir ]]; then
extraArgs="--initial-branch=main"
@ -173,11 +173,11 @@ nix build -o $TEST_ROOT/result $flake2Dir#bar --no-write-lock-file
nix build -o $TEST_ROOT/result $flake2Dir#bar --no-update-lock-file 2>&1 | grep 'requires lock file changes'
nix build -o $TEST_ROOT/result $flake2Dir#bar --commit-lock-file
[[ -e $flake2Dir/flake.lock ]]
[[ -z $(git -C $flake2Dir diff master) ]]
[[ -z $(git -C $flake2Dir diff main || echo failed) ]]
# Rerunning the build should not change the lockfile.
nix build -o $TEST_ROOT/result $flake2Dir#bar
[[ -z $(git -C $flake2Dir diff master) ]]
[[ -z $(git -C $flake2Dir diff main || echo failed) ]]
# Building with a lockfile should not require a fetch of the registry.
nix build -o $TEST_ROOT/result --flake-registry file:///no-registry.json $flake2Dir#bar --refresh
@ -186,7 +186,7 @@ nix build -o $TEST_ROOT/result --no-use-registries $flake2Dir#bar --refresh
# Updating the flake should not change the lockfile.
nix flake lock $flake2Dir
[[ -z $(git -C $flake2Dir diff master) ]]
[[ -z $(git -C $flake2Dir diff main || echo failed) ]]
# Now we should be able to build the flake in pure mode.
nix build -o $TEST_ROOT/result flake2#bar
@ -221,7 +221,7 @@ nix build -o $TEST_ROOT/result $flake3Dir#"sth sth"
nix build -o $TEST_ROOT/result $flake3Dir#"sth%20sth"
# Check whether it saved the lockfile
(! [[ -z $(git -C $flake3Dir diff master) ]])
[[ -n $(git -C $flake3Dir diff master) ]]
git -C $flake3Dir add flake.lock
@ -321,10 +321,10 @@ nix build -o $TEST_ROOT/result flake4#xyzzy
# Test 'nix flake update' and --override-flake.
nix flake lock $flake3Dir
[[ -z $(git -C $flake3Dir diff master) ]]
[[ -z $(git -C $flake3Dir diff master || echo failed) ]]
nix flake update $flake3Dir --override-flake flake2 nixpkgs
[[ ! -z $(git -C $flake3Dir diff master) ]]
[[ ! -z $(git -C $flake3Dir diff master || echo failed) ]]
# Make branch "removeXyzzy" where flake3 doesn't have xyzzy anymore
git -C $flake3Dir checkout -b removeXyzzy

View file

@ -103,7 +103,7 @@ makeTest (
{ config, lib, pkgs, nodes, ... }:
{ virtualisation.writableStore = true;
virtualisation.diskSize = 2048;
virtualisation.pathsInNixDB = [ pkgs.hello pkgs.fuse ];
virtualisation.additionalPaths = [ pkgs.hello pkgs.fuse ];
virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions = "experimental-features = nix-command flakes";

View file

@ -14,7 +14,7 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pk
{ client =
{ config, lib, pkgs, ... }:
{ virtualisation.writableStore = true;
virtualisation.pathsInNixDB = [ pkgA pkgD.drvPath ];
virtualisation.additionalPaths = [ pkgA pkgD.drvPath ];
nix.binaryCaches = lib.mkForce [ ];
};
@ -22,7 +22,7 @@ makeTest (let pkgA = pkgs.cowsay; pkgB = pkgs.wget; pkgC = pkgs.hello; pkgD = pk
{ config, pkgs, ... }:
{ services.openssh.enable = true;
virtualisation.writableStore = true;
virtualisation.pathsInNixDB = [ pkgB pkgC ];
virtualisation.additionalPaths = [ pkgB pkgC ];
};
};

View file

@ -5,6 +5,42 @@ with import (nixpkgs + "/nixos/lib/testing-python.nix") {
extraConfigurations = [ { nixpkgs.overlays = [ overlay ]; } ];
};
let
nix-fetch = pkgs.writeText "fetch.nix" ''
derivation {
# This derivation is an copy from what is available over at
# nix.git:corepkgs/fetchurl.nix
builder = "builtin:fetchurl";
# We're going to fetch data from the http_dns instance created before
# we expect the content to be the same as the content available there.
# ```
# $ nix-hash --type sha256 --to-base32 $(echo "hello world" | sha256sum | cut -d " " -f 1)
# 0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59
# ```
outputHash = "0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59";
outputHashAlgo = "sha256";
outputHashMode = "flat";
name = "example.com";
url = "http://example.com";
unpack = false;
executable = false;
system = "builtin";
preferLocalBuild = true;
impureEnvVars = [
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
];
urls = [ "http://example.com" ];
}
'';
in
makeTest (
rec {
@ -68,40 +104,6 @@ rec {
};
};
nix-fetch = pkgs.writeText "fetch.nix" ''
derivation {
# This derivation is an copy from what is available over at
# nix.git:corepkgs/fetchurl.nix
builder = "builtin:fetchurl";
# We're going to fetch data from the http_dns instance created before
# we expect the content to be the same as the content available there.
# ```
# $ nix-hash --type sha256 --to-base32 $(echo "hello world" | sha256sum | cut -d " " -f 1)
# 0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59
# ```
outputHash = "0ix4jahrkll5zg01wandq78jw3ab30q4nscph67rniqg5x7r0j59";
outputHashAlgo = "sha256";
outputHashMode = "flat";
name = "example.com";
url = "http://example.com";
unpack = false;
executable = false;
system = "builtin";
preferLocalBuild = true;
impureEnvVars = [
"http_proxy" "https_proxy" "ftp_proxy" "all_proxy" "no_proxy"
];
urls = [ "http://example.com" ];
}
'';
testScript = { nodes, ... }: ''
http_dns.wait_for_unit("nginx")
http_dns.wait_for_open_port(80)

View file

@ -61,7 +61,7 @@ in
}
];
virtualisation.writableStore = true;
virtualisation.pathsInNixDB = [ config.system.build.extraUtils ];
virtualisation.additionalPaths = [ config.system.build.extraUtils ];
nix.binaryCaches = lib.mkForce [ ];
programs.ssh.extraConfig = "ConnectTimeout 30";
};

View file

@ -42,6 +42,11 @@ testRepl () {
echo "$replOutput"
echo "$replOutput" | grep -qs "while evaluating the file" \
|| fail "nix repl --show-trace doesn't show the trace"
nix repl "${nixArgs[@]}" --option pure-eval true 2>&1 <<< "builtins.currentSystem" \
| grep "attribute 'currentSystem' missing"
nix repl "${nixArgs[@]}" 2>&1 <<< "builtins.currentSystem" \
| grep "$(nix-instantiate --eval -E 'builtins.currentSystem')"
}
# Simple test, try building a drv

View file

@ -28,11 +28,18 @@ nix search -f search.nix '' |grep -q hello
e=$'\x1b' # grep doesn't support \e, \033 or even \x1b
# Multiple overlapping regexes
(( $(nix search -f search.nix '' 'oo' 'foo' 'oo' | grep "$e\[32;1mfoo$e\\[0;1m" | wc -l) == 1 ))
(( $(nix search -f search.nix '' 'broken b' 'en bar' | grep "$e\[32;1mbroken bar$e\\[0m" | wc -l) == 1 ))
(( $(nix search -f search.nix '' 'oo' 'foo' 'oo' | grep -c "$e\[32;1mfoo$e\\[0;1m") == 1 ))
(( $(nix search -f search.nix '' 'broken b' 'en bar' | grep -c "$e\[32;1mbroken bar$e\\[0m") == 1 ))
# Multiple matches
# Searching for 'o' should yield the 'o' in 'broken bar', the 'oo' in foo and 'o' in hello
(( $(nix search -f search.nix '' 'o' | grep -Eo "$e\[32;1mo{1,2}$e\[(0|0;1)m" | wc -l) == 3 ))
(( $(nix search -f search.nix '' 'o' | grep -Eoc "$e\[32;1mo{1,2}$e\[(0|0;1)m") == 3 ))
# Searching for 'b' should yield the 'b' in bar and the two 'b's in 'broken bar'
# NOTE: This does not work with `grep -c` because it counts the two 'b's in 'broken bar' as one matched line
(( $(nix search -f search.nix '' 'b' | grep -Eo "$e\[32;1mb$e\[(0|0;1)m" | wc -l) == 3 ))
## Tests for --exclude
(( $(nix search -f search.nix -e hello | grep -c hello) == 0 ))
(( $(nix search -f search.nix foo --exclude 'foo|bar' | grep -Ec 'foo|bar') == 0 ))
(( $(nix search -f search.nix foo -e foo --exclude bar | grep -Ec 'foo|bar') == 0 ))

View file

@ -10,12 +10,12 @@ with import (nixpkgs + "/nixos/lib/testing-python.nix") {
makeTest {
name = "setuid";
machine =
nodes.machine =
{ config, lib, pkgs, ... }:
{ virtualisation.writableStore = true;
nix.binaryCaches = lib.mkForce [ ];
nix.nixPath = [ "nixpkgs=${lib.cleanSource pkgs.path}" ];
virtualisation.pathsInNixDB = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ];
virtualisation.additionalPaths = [ pkgs.stdenv pkgs.pkgsi686Linux.stdenv ];
};
testScript = { nodes }: ''

View file

@ -59,7 +59,7 @@ let
echo 'ref: refs/heads/master' > $out/HEAD
mkdir -p $out/info
echo -e '${nixpkgs.rev}\trefs/heads/master' > $out/info/refs
echo -e '${nixpkgs.rev}\trefs/heads/master\n${nixpkgs.rev}\trefs/tags/foo-bar' > $out/info/refs
'';
in
@ -106,7 +106,7 @@ makeTest (
{
virtualisation.writableStore = true;
virtualisation.diskSize = 2048;
virtualisation.pathsInNixDB = [ pkgs.hello pkgs.fuse ];
virtualisation.additionalPaths = [ pkgs.hello pkgs.fuse ];
virtualisation.memorySize = 4096;
nix.binaryCaches = lib.mkForce [ ];
nix.extraOptions = ''
@ -132,6 +132,17 @@ makeTest (
client.succeed("curl -v https://git.sr.ht/ >&2")
client.succeed("nix registry list | grep nixpkgs")
# Test that it resolves HEAD
rev = client.succeed("nix flake info sourcehut:~NixOS/nixpkgs --json | jq -r .revision")
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch"
# Test that it resolves branches
rev = client.succeed("nix flake info sourcehut:~NixOS/nixpkgs/master --json | jq -r .revision")
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch"
# Test that it resolves tags
rev = client.succeed("nix flake info sourcehut:~NixOS/nixpkgs/foo-bar --json | jq -r .revision")
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch"
# Registry and pinning test
rev = client.succeed("nix flake info nixpkgs --json | jq -r .revision")
assert rev.strip() == "${nixpkgs.rev}", "revision mismatch"