Compare commits
90 commits
fix-curl-t
...
master
Author | SHA1 | Date | |
---|---|---|---|
5038e1bec4 | |||
02b4632e77 | |||
3a022d4599 | |||
bd79c1f6f6 | |||
e63c9e73e3 | |||
c6a542f22a | |||
7f9a0033c7 | |||
619cc4af85 | |||
c6a0f4c393 | |||
9533d85ce0 | |||
6b83174fff | |||
cf4c31c872 | |||
ae244af242 | |||
92ede15dd9 | |||
a56b51a0ba | |||
b9d6c6f000 | |||
5dafde28db | |||
d20f814cde | |||
252c78b288 | |||
b774845af7 | |||
5fad9d01c2 | |||
08ee364950 | |||
e07ec8d27e | |||
cec50290bf | |||
f186000367 | |||
7348653ff4 | |||
8c4ea7a451 | |||
918717f3b5 | |||
87c604c1f0 | |||
84de821004 | |||
8478c99d09 | |||
a2c4fcd5e9 | |||
5bdac86be2 | |||
31f5ecfaa5 | |||
ecb0a23d51 | |||
f27e53f77e | |||
b6120d26a8 | |||
c128031492 | |||
7ef2645f45 | |||
693e68e09c | |||
7298a38a07 | |||
ad03159e25 | |||
bd285849ed | |||
5fa8b3f965 | |||
ceefddafe8 | |||
787015fec0 | |||
fdff96501f | |||
e5b397b2c7 | |||
177e5742fa | |||
73728874ab | |||
45b3dc325a | |||
800fba1037 | |||
171d784404 | |||
ee07ce7554 | |||
d459224724 | |||
15ee2bc2fe | |||
057af1dbd8 | |||
6dab42a551 | |||
5c06a8d328 | |||
92ddce4f46 | |||
0463d5e36f | |||
1dbaf11948 | |||
f435634a29 | |||
b226b5cd97 | |||
91b00b145f | |||
b7ea98bf34 | |||
477f82e5a7 | |||
653c407784 | |||
ee9c988a1b | |||
35ebae198f | |||
a02457db71 | |||
05a10dd835 | |||
2053ac7747 | |||
f9021c4c6c | |||
1eeaf99cf8 | |||
56df30cd3f | |||
c3fefd1a6e | |||
363a2f6826 | |||
399b6f3c46 | |||
a2597d5f27 | |||
e349f2c0a3 | |||
ec415d7166 | |||
7c5596734f | |||
320126aeeb | |||
9a0855bbb6 | |||
11d8534629 | |||
d171090530 | |||
5011a52cf3 | |||
4a3e96281d | |||
d854e7dfd6 |
38
contrib/stack-collapse.py
Executable file
38
contrib/stack-collapse.py
Executable file
|
@ -0,0 +1,38 @@
|
||||||
|
#!/usr/bin/env nix-shell
|
||||||
|
#!nix-shell -i python3 -p python3 --pure
|
||||||
|
|
||||||
|
# To be used with `--trace-function-calls` and `flamegraph.pl`.
|
||||||
|
#
|
||||||
|
# For example:
|
||||||
|
#
|
||||||
|
# nix-instantiate --trace-function-calls '<nixpkgs>' -A hello 2> nix-function-calls.trace
|
||||||
|
# ./contrib/stack-collapse.py nix-function-calls.trace > nix-function-calls.folded
|
||||||
|
# nix-shell -p flamegraph --run "flamegraph.pl nix-function-calls.folded > nix-function-calls.svg"
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from pprint import pprint
|
||||||
|
import fileinput
|
||||||
|
|
||||||
|
stack = []
|
||||||
|
timestack = []
|
||||||
|
|
||||||
|
for line in fileinput.input():
|
||||||
|
components = line.strip().split(" ", 2)
|
||||||
|
if components[0] != "function-trace":
|
||||||
|
continue
|
||||||
|
|
||||||
|
direction = components[1]
|
||||||
|
components = components[2].rsplit(" ", 2)
|
||||||
|
|
||||||
|
loc = components[0]
|
||||||
|
_at = components[1]
|
||||||
|
time = int(components[2])
|
||||||
|
|
||||||
|
if direction == "entered":
|
||||||
|
stack.append(loc)
|
||||||
|
timestack.append(time)
|
||||||
|
elif direction == "exited":
|
||||||
|
dur = time - timestack.pop()
|
||||||
|
vst = ";".join(stack)
|
||||||
|
print(f"{vst} {dur}")
|
||||||
|
stack.pop()
|
|
@ -9,5 +9,6 @@
|
||||||
<xi:include href="distributed-builds.xml" />
|
<xi:include href="distributed-builds.xml" />
|
||||||
<xi:include href="cores-vs-jobs.xml" />
|
<xi:include href="cores-vs-jobs.xml" />
|
||||||
<xi:include href="diff-hook.xml" />
|
<xi:include href="diff-hook.xml" />
|
||||||
|
<xi:include href="post-build-hook.xml" />
|
||||||
|
|
||||||
</part>
|
</part>
|
||||||
|
|
160
doc/manual/advanced-topics/post-build-hook.xml
Normal file
160
doc/manual/advanced-topics/post-build-hook.xml
Normal file
|
@ -0,0 +1,160 @@
|
||||||
|
<chapter xmlns="http://docbook.org/ns/docbook"
|
||||||
|
xmlns:xlink="http://www.w3.org/1999/xlink"
|
||||||
|
xmlns:xi="http://www.w3.org/2001/XInclude"
|
||||||
|
xml:id="chap-post-build-hook"
|
||||||
|
version="5.0"
|
||||||
|
>
|
||||||
|
|
||||||
|
<title>Using the <xref linkend="conf-post-build-hook" /></title>
|
||||||
|
<subtitle>Uploading to an S3-compatible binary cache after each build</subtitle>
|
||||||
|
|
||||||
|
|
||||||
|
<section xml:id="chap-post-build-hook-caveats">
|
||||||
|
<title>Implementation Caveats</title>
|
||||||
|
<para>Here we use the post-build hook to upload to a binary cache.
|
||||||
|
This is a simple and working example, but it is not suitable for all
|
||||||
|
use cases.</para>
|
||||||
|
|
||||||
|
<para>The post build hook program runs after each executed build,
|
||||||
|
and blocks the build loop. The build loop exits if the hook program
|
||||||
|
fails.</para>
|
||||||
|
|
||||||
|
<para>Concretely, this implementation will make Nix slow or unusable
|
||||||
|
when the internet is slow or unreliable.</para>
|
||||||
|
|
||||||
|
<para>A more advanced implementation might pass the store paths to a
|
||||||
|
user-supplied daemon or queue for processing the store paths outside
|
||||||
|
of the build loop.</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<title>Prerequisites</title>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
This tutorial assumes you have configured an S3-compatible binary cache
|
||||||
|
according to the instructions at
|
||||||
|
<xref linkend="ssec-s3-substituter-authenticated-writes" />, and
|
||||||
|
that the <literal>root</literal> user's default AWS profile can
|
||||||
|
upload to the bucket.
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<title>Set up a Signing Key</title>
|
||||||
|
<para>Use <command>nix-store --generate-binary-cache-key</command> to
|
||||||
|
create our public and private signing keys. We will sign paths
|
||||||
|
with the private key, and distribute the public key for verifying
|
||||||
|
the authenticity of the paths.</para>
|
||||||
|
|
||||||
|
<screen>
|
||||||
|
# nix-store --generate-binary-cache-key example-nix-cache-1 /etc/nix/key.private /etc/nix/key.public
|
||||||
|
# cat /etc/nix/key.public
|
||||||
|
example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
|
||||||
|
</screen>
|
||||||
|
|
||||||
|
<para>Then, add the public key and the cache URL to your
|
||||||
|
<filename>nix.conf</filename>'s <xref linkend="conf-trusted-public-keys" />
|
||||||
|
and <xref linkend="conf-substituters" /> like:</para>
|
||||||
|
|
||||||
|
<programlisting>
|
||||||
|
substituters = https://cache.nixos.org/ s3://example-nix-cache
|
||||||
|
trusted-public-keys = cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY= example-nix-cache-1:1/cKDz3QCCOmwcztD2eV6Coggp6rqc9DGjWv7C0G+rM=
|
||||||
|
</programlisting>
|
||||||
|
|
||||||
|
<para>we will restart the Nix daemon a later step.</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<title>Implementing the build hook</title>
|
||||||
|
<para>Write the following script to
|
||||||
|
<filename>/etc/nix/upload-to-cache.sh</filename>:
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<programlisting>
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
set -f # disable globbing
|
||||||
|
export IFS=' '
|
||||||
|
|
||||||
|
echo "Signing paths" $OUT_PATHS
|
||||||
|
nix sign-paths --key-file /etc/nix/key.private $OUT_PATHS
|
||||||
|
echo "Uploading paths" $OUT_PATHS
|
||||||
|
exec nix copy --to 's3://example-nix-cache' $OUT_PATHS
|
||||||
|
</programlisting>
|
||||||
|
|
||||||
|
<note>
|
||||||
|
<title>Should <literal>$OUT_PATHS</literal> be quoted?</title>
|
||||||
|
<para>
|
||||||
|
The <literal>$OUT_PATHS</literal> variable is a space-separated
|
||||||
|
list of Nix store paths. In this case, we expect and want the
|
||||||
|
shell to perform word splitting to make each output path its
|
||||||
|
own argument to <command>nix sign-paths</command>. Nix guarantees
|
||||||
|
the paths will not contain any spaces, however a store path
|
||||||
|
might contain glob characters. The <command>set -f</command>
|
||||||
|
disables globbing in the shell.
|
||||||
|
</para>
|
||||||
|
</note>
|
||||||
|
<para>
|
||||||
|
Then make sure the hook program is executable by the <literal>root</literal> user:
|
||||||
|
<screen>
|
||||||
|
# chmod +x /etc/nix/upload-to-cache.sh
|
||||||
|
</screen></para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<title>Updating Nix Configuration</title>
|
||||||
|
|
||||||
|
<para>Edit <filename>/etc/nix/nix.conf</filename> to run our hook,
|
||||||
|
by adding the following configuration snippet at the end:</para>
|
||||||
|
|
||||||
|
<programlisting>
|
||||||
|
post-build-hook = /etc/nix/upload-to-cache.sh
|
||||||
|
</programlisting>
|
||||||
|
|
||||||
|
<para>Then, restart the <command>nix-daemon</command>.</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section>
|
||||||
|
<title>Testing</title>
|
||||||
|
|
||||||
|
<para>Build any derivation, for example:</para>
|
||||||
|
|
||||||
|
<screen>
|
||||||
|
$ nix-build -E '(import <nixpkgs> {}).writeText "example" (builtins.toString builtins.currentTime)'
|
||||||
|
these derivations will be built:
|
||||||
|
/nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv
|
||||||
|
building '/nix/store/s4pnfbkalzy5qz57qs6yybna8wylkig6-example.drv'...
|
||||||
|
running post-build-hook '/home/grahamc/projects/github.com/NixOS/nix/post-hook.sh'...
|
||||||
|
post-build-hook: Signing paths /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||||
|
post-build-hook: Uploading paths /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||||
|
/nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||||
|
</screen>
|
||||||
|
|
||||||
|
<para>Then delete the path from the store, and try substituting it from the binary cache:</para>
|
||||||
|
<screen>
|
||||||
|
$ rm ./result
|
||||||
|
$ nix-store --delete /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||||
|
</screen>
|
||||||
|
|
||||||
|
<para>Now, copy the path back from the cache:</para>
|
||||||
|
<screen>
|
||||||
|
$ nix store --realize /nix/store/ibcyipq5gf91838ldx40mjsp0b8w9n18-example
|
||||||
|
copying path '/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example from 's3://example-nix-cache'...
|
||||||
|
warning: you did not specify '--add-root'; the result might be removed by the garbage collector
|
||||||
|
/nix/store/m8bmqwrch6l3h8s0k3d673xpmipcdpsa-example
|
||||||
|
</screen>
|
||||||
|
</section>
|
||||||
|
<section>
|
||||||
|
<title>Conclusion</title>
|
||||||
|
<para>
|
||||||
|
We now have a Nix installation configured to automatically sign and
|
||||||
|
upload every local build to a remote binary cache.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
Before deploying this to production, be sure to consider the
|
||||||
|
implementation caveats in <xref linkend="chap-post-build-hook-caveats" />.
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
|
</chapter>
|
|
@ -483,8 +483,10 @@ builtins.fetchurl {
|
||||||
|
|
||||||
<varlistentry xml:id="conf-max-free"><term><literal>max-free</literal></term>
|
<varlistentry xml:id="conf-max-free"><term><literal>max-free</literal></term>
|
||||||
|
|
||||||
<listitem><para>This option defines after how many free bytes to stop collecting
|
<listitem><para>When a garbage collection is triggered by the
|
||||||
garbage once the <literal>min-free</literal> condition gets triggered.</para></listitem>
|
<literal>min-free</literal> option, it stops as soon as
|
||||||
|
<literal>max-free</literal> bytes are available. The default is
|
||||||
|
infinity (i.e. delete all garbage).</para></listitem>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
@ -528,9 +530,11 @@ builtins.fetchurl {
|
||||||
<varlistentry xml:id="conf-min-free"><term><literal>min-free</literal></term>
|
<varlistentry xml:id="conf-min-free"><term><literal>min-free</literal></term>
|
||||||
|
|
||||||
<listitem>
|
<listitem>
|
||||||
<para>When the disk reaches <literal>min-free</literal> bytes of free disk space during a build, nix
|
<para>When free disk space in <filename>/nix/store</filename>
|
||||||
will start to garbage-collection until <literal>max-free</literal> bytes are available on the disk.
|
drops below <literal>min-free</literal> during a build, Nix
|
||||||
A value of <literal>0</literal> (the default) means that this feature is disabled.</para>
|
performs a garbage-collection until <literal>max-free</literal>
|
||||||
|
bytes are available or there is no more garbage. A value of
|
||||||
|
<literal>0</literal> (the default) disables this feature.</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
@ -660,6 +664,62 @@ password <replaceable>my-password</replaceable>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry xml:id="conf-post-build-hook">
|
||||||
|
<term><literal>post-build-hook</literal></term>
|
||||||
|
<listitem>
|
||||||
|
<para>Optional. The path to a program to execute after each build.</para>
|
||||||
|
|
||||||
|
<para>This option is only settable in the global
|
||||||
|
<filename>nix.conf</filename>, or on the command line by trusted
|
||||||
|
users.</para>
|
||||||
|
|
||||||
|
<para>When using the nix-daemon, the daemon executes the hook as
|
||||||
|
<literal>root</literal>. If the nix-daemon is not involved, the
|
||||||
|
hook runs as the user executing the nix-build.</para>
|
||||||
|
|
||||||
|
<itemizedlist>
|
||||||
|
<listitem><para>The hook executes after an evaluation-time build.</para></listitem>
|
||||||
|
<listitem><para>The hook does not execute on substituted paths.</para></listitem>
|
||||||
|
<listitem><para>The hook's output always goes to the user's terminal.</para></listitem>
|
||||||
|
<listitem><para>If the hook fails, the build succeeds but no further builds execute.</para></listitem>
|
||||||
|
<listitem><para>The hook executes synchronously, and blocks other builds from progressing while it runs.</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
|
||||||
|
<para>The program executes with no arguments. The program's environment
|
||||||
|
contains the following environment variables:</para>
|
||||||
|
|
||||||
|
<variablelist>
|
||||||
|
<varlistentry>
|
||||||
|
<term><envar>DRV_PATH</envar></term>
|
||||||
|
<listitem>
|
||||||
|
<para>The derivation for the built paths.</para>
|
||||||
|
<para>Example:
|
||||||
|
<literal>/nix/store/5nihn1a7pa8b25l9zafqaqibznlvvp3f-bash-4.4-p23.drv</literal>
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry>
|
||||||
|
<term><envar>OUT_PATHS</envar></term>
|
||||||
|
<listitem>
|
||||||
|
<para>Output paths of the built derivation, separated by a space character.</para>
|
||||||
|
<para>Example:
|
||||||
|
<literal>/nix/store/zf5lbh336mnzf1nlswdn11g4n2m8zh3g-bash-4.4-p23-dev
|
||||||
|
/nix/store/rjxwxwv1fpn9wa2x5ssk5phzwlcv4mna-bash-4.4-p23-doc
|
||||||
|
/nix/store/6bqvbzjkcp9695dq0dpl5y43nvy37pq1-bash-4.4-p23-info
|
||||||
|
/nix/store/r7fng3kk3vlpdlh2idnrbn37vh4imlj2-bash-4.4-p23-man
|
||||||
|
/nix/store/xfghy8ixrhz3kyy6p724iv3cxji088dx-bash-4.4-p23</literal>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
</varlistentry>
|
||||||
|
</variablelist>
|
||||||
|
|
||||||
|
<para>See <xref linkend="chap-post-build-hook" /> for an example
|
||||||
|
implementation.</para>
|
||||||
|
|
||||||
|
</listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry xml:id="conf-repeat"><term><literal>repeat</literal></term>
|
<varlistentry xml:id="conf-repeat"><term><literal>repeat</literal></term>
|
||||||
|
|
||||||
<listitem><para>How many times to repeat builds to check whether
|
<listitem><para>How many times to repeat builds to check whether
|
||||||
|
@ -813,6 +873,14 @@ password <replaceable>my-password</replaceable>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry xml:id="conf-stalled-download-timeout"><term><literal>stalled-download-timeout</literal></term>
|
||||||
|
<listitem>
|
||||||
|
<para>The timeout (in seconds) for receiving data from servers
|
||||||
|
during download. Nix cancels idle downloads after this timeout's
|
||||||
|
duration.</para>
|
||||||
|
</listitem>
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry xml:id="conf-substituters"><term><literal>substituters</literal></term>
|
<varlistentry xml:id="conf-substituters"><term><literal>substituters</literal></term>
|
||||||
|
|
||||||
<listitem><para>A list of URLs of substituters, separated by
|
<listitem><para>A list of URLs of substituters, separated by
|
||||||
|
@ -913,6 +981,34 @@ requiredSystemFeatures = [ "kvm" ];
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
<varlistentry xml:id="conf-trace-function-calls"><term><literal>trace-function-calls</literal></term>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
|
||||||
|
<para>Default: <literal>false</literal>.</para>
|
||||||
|
|
||||||
|
<para>If set to <literal>true</literal>, the Nix evaluator will
|
||||||
|
trace every function call. Nix will print a log message at the
|
||||||
|
"vomit" level for every function entrance and function exit.</para>
|
||||||
|
|
||||||
|
<informalexample><screen>
|
||||||
|
function-trace entered undefined position at 1565795816999559622
|
||||||
|
function-trace exited undefined position at 1565795816999581277
|
||||||
|
function-trace entered /nix/store/.../example.nix:226:41 at 1565795253249935150
|
||||||
|
function-trace exited /nix/store/.../example.nix:226:41 at 1565795253249941684
|
||||||
|
</screen></informalexample>
|
||||||
|
|
||||||
|
<para>The <literal>undefined position</literal> means the function
|
||||||
|
call is a builtin.</para>
|
||||||
|
|
||||||
|
<para>Use the <literal>contrib/stack-collapse.py</literal> script
|
||||||
|
distributed with the Nix source code to convert the trace logs
|
||||||
|
in to a format suitable for <command>flamegraph.pl</command>.</para>
|
||||||
|
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
<varlistentry xml:id="conf-trusted-public-keys"><term><literal>trusted-public-keys</literal></term>
|
<varlistentry xml:id="conf-trusted-public-keys"><term><literal>trusted-public-keys</literal></term>
|
||||||
|
|
||||||
<listitem><para>A whitespace-separated list of public keys. When
|
<listitem><para>A whitespace-separated list of public keys. When
|
||||||
|
|
|
@ -221,31 +221,53 @@ also <xref linkend="sec-common-options" />.</phrase></para>
|
||||||
|
|
||||||
<varlistentry><term><filename>~/.nix-defexpr</filename></term>
|
<varlistentry><term><filename>~/.nix-defexpr</filename></term>
|
||||||
|
|
||||||
<listitem><para>A directory that contains the default Nix
|
<listitem><para>The source for the default Nix
|
||||||
expressions used by the <option>--install</option>,
|
expressions used by the <option>--install</option>,
|
||||||
<option>--upgrade</option>, and <option>--query
|
<option>--upgrade</option>, and <option>--query
|
||||||
--available</option> operations to obtain derivations. The
|
--available</option> operations to obtain derivations. The
|
||||||
<option>--file</option> option may be used to override this
|
<option>--file</option> option may be used to override this
|
||||||
default.</para>
|
default.</para>
|
||||||
|
|
||||||
<para>The Nix expressions in this directory are combined into a
|
<para>If <filename>~/.nix-defexpr</filename> is a file,
|
||||||
single set, with each file as an attribute that has the name of
|
it is loaded as a Nix expression. If the expression
|
||||||
the file. Thus, if <filename>~/.nix-defexpr</filename> contains
|
is a set, it is used as the default Nix expression.
|
||||||
two files, <filename>foo</filename> and <filename>bar</filename>,
|
If the expression is a function, an empty set is passed
|
||||||
|
as argument and the return value is used as
|
||||||
|
the default Nix expression.</para>
|
||||||
|
|
||||||
|
<para>If <filename>~/.nix-defexpr</filename> is a directory
|
||||||
|
containing a <filename>default.nix</filename> file, that file
|
||||||
|
is loaded as in the above paragraph.</para>
|
||||||
|
|
||||||
|
<para>If <filename>~/.nix-defexpr</filename> is a directory without
|
||||||
|
a <filename>default.nix</filename> file, then its contents
|
||||||
|
(both files and subdirectories) are loaded as Nix expressions.
|
||||||
|
The expressions are combined into a single set, each expression
|
||||||
|
under an attribute with the same name as the original file
|
||||||
|
or subdirectory.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>For example, if <filename>~/.nix-defexpr</filename> contains
|
||||||
|
two files, <filename>foo.nix</filename> and <filename>bar.nix</filename>,
|
||||||
then the default Nix expression will essentially be
|
then the default Nix expression will essentially be
|
||||||
|
|
||||||
<programlisting>
|
<programlisting>
|
||||||
{
|
{
|
||||||
foo = import ~/.nix-defexpr/foo;
|
foo = import ~/.nix-defexpr/foo.nix;
|
||||||
bar = import ~/.nix-defexpr/bar;
|
bar = import ~/.nix-defexpr/bar.nix;
|
||||||
}</programlisting>
|
}</programlisting>
|
||||||
|
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
|
<para>The file <filename>manifest.nix</filename> is always ignored.
|
||||||
|
Subdirectories without a <filename>default.nix</filename> file
|
||||||
|
are traversed recursively in search of more Nix expressions,
|
||||||
|
but the names of these intermediate directories are not
|
||||||
|
added to the attribute paths of the default Nix expression.</para>
|
||||||
|
|
||||||
<para>The command <command>nix-channel</command> places symlinks
|
<para>The command <command>nix-channel</command> places symlinks
|
||||||
to the downloaded Nix expressions from each subscribed channel in
|
to the downloaded Nix expressions from each subscribed channel in
|
||||||
this directory.</para>
|
this directory.</para>
|
||||||
|
|
||||||
</listitem>
|
</listitem>
|
||||||
|
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
@ -1348,10 +1370,13 @@ profile. The generations can be a list of generation numbers, the
|
||||||
special value <literal>old</literal> to delete all non-current
|
special value <literal>old</literal> to delete all non-current
|
||||||
generations, a value such as <literal>30d</literal> to delete all
|
generations, a value such as <literal>30d</literal> to delete all
|
||||||
generations older than the specified number of days (except for the
|
generations older than the specified number of days (except for the
|
||||||
generation that was active at that point in time), or a value such as.
|
generation that was active at that point in time), or a value such as
|
||||||
<literal>+5</literal> to only keep the specified items older than the
|
<literal>+5</literal> to keep the last <literal>5</literal> generations
|
||||||
current generation. Periodically deleting old generations is important
|
ignoring any newer than current, e.g., if <literal>30</literal> is the current
|
||||||
to make garbage collection effective.</para>
|
generation <literal>+5</literal> will delete generation <literal>25</literal>
|
||||||
|
and all older generations.
|
||||||
|
Periodically deleting old generations is important to make garbage collection
|
||||||
|
effective.</para>
|
||||||
|
|
||||||
</refsection>
|
</refsection>
|
||||||
|
|
||||||
|
|
|
@ -170,18 +170,6 @@ if builtins ? getEnv then builtins.getEnv "PATH" else ""</programlisting>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
|
||||||
<varlistentry xml:id='builtin-splitVersion'>
|
|
||||||
<term><function>builtins.splitVersion</function>
|
|
||||||
<replaceable>s</replaceable></term>
|
|
||||||
|
|
||||||
<listitem><para>Split a string representing a version into its
|
|
||||||
components, by the same version splitting logic underlying the
|
|
||||||
version comparison in <link linkend="ssec-version-comparisons">
|
|
||||||
<command>nix-env -u</command></link>.</para></listitem>
|
|
||||||
|
|
||||||
</varlistentry>
|
|
||||||
|
|
||||||
|
|
||||||
<varlistentry xml:id='builtin-concatLists'>
|
<varlistentry xml:id='builtin-concatLists'>
|
||||||
<term><function>builtins.concatLists</function>
|
<term><function>builtins.concatLists</function>
|
||||||
<replaceable>lists</replaceable></term>
|
<replaceable>lists</replaceable></term>
|
||||||
|
@ -448,7 +436,7 @@ stdenv.mkDerivation { … }
|
||||||
<example>
|
<example>
|
||||||
<title>Fetching an arbitrary ref</title>
|
<title>Fetching an arbitrary ref</title>
|
||||||
<programlisting>builtins.fetchGit {
|
<programlisting>builtins.fetchGit {
|
||||||
url = "https://gitub.com/NixOS/nix.git";
|
url = "https://github.com/NixOS/nix.git";
|
||||||
ref = "refs/heads/0.5-release";
|
ref = "refs/heads/0.5-release";
|
||||||
}</programlisting>
|
}</programlisting>
|
||||||
</example>
|
</example>
|
||||||
|
@ -499,11 +487,8 @@ stdenv.mkDerivation { … }
|
||||||
<title>Fetching a tag</title>
|
<title>Fetching a tag</title>
|
||||||
<programlisting>builtins.fetchGit {
|
<programlisting>builtins.fetchGit {
|
||||||
url = "https://github.com/nixos/nix.git";
|
url = "https://github.com/nixos/nix.git";
|
||||||
ref = "tags/1.9";
|
ref = "refs/tags/1.9";
|
||||||
}</programlisting>
|
}</programlisting>
|
||||||
<note><para>Due to a bug (<link
|
|
||||||
xlink:href="https://github.com/NixOS/nix/issues/2385">#2385</link>),
|
|
||||||
only non-annotated tags can be fetched.</para></note>
|
|
||||||
</example>
|
</example>
|
||||||
|
|
||||||
<example>
|
<example>
|
||||||
|
@ -1275,6 +1260,19 @@ Evaluates to <literal>[ " " [ "FOO" ] " " ]</literal>.
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
</varlistentry>
|
</varlistentry>
|
||||||
|
|
||||||
|
|
||||||
|
<varlistentry xml:id='builtin-splitVersion'>
|
||||||
|
<term><function>builtins.splitVersion</function>
|
||||||
|
<replaceable>s</replaceable></term>
|
||||||
|
|
||||||
|
<listitem><para>Split a string representing a version into its
|
||||||
|
components, by the same version splitting logic underlying the
|
||||||
|
version comparison in <link linkend="ssec-version-comparisons">
|
||||||
|
<command>nix-env -u</command></link>.</para></listitem>
|
||||||
|
|
||||||
|
</varlistentry>
|
||||||
|
|
||||||
|
|
||||||
<varlistentry xml:id='builtin-stringLength'>
|
<varlistentry xml:id='builtin-stringLength'>
|
||||||
<term><function>builtins.stringLength</function>
|
<term><function>builtins.stringLength</function>
|
||||||
<replaceable>e</replaceable></term>
|
<replaceable>e</replaceable></term>
|
||||||
|
|
|
@ -15,13 +15,16 @@ weakest binding).</para>
|
||||||
<tgroup cols='3'>
|
<tgroup cols='3'>
|
||||||
<thead>
|
<thead>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Name</entry>
|
||||||
<entry>Syntax</entry>
|
<entry>Syntax</entry>
|
||||||
<entry>Associativity</entry>
|
<entry>Associativity</entry>
|
||||||
<entry>Description</entry>
|
<entry>Description</entry>
|
||||||
|
<entry>Precedence</entry>
|
||||||
</row>
|
</row>
|
||||||
</thead>
|
</thead>
|
||||||
<tbody>
|
<tbody>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Select</entry>
|
||||||
<entry><replaceable>e</replaceable> <literal>.</literal>
|
<entry><replaceable>e</replaceable> <literal>.</literal>
|
||||||
<replaceable>attrpath</replaceable>
|
<replaceable>attrpath</replaceable>
|
||||||
[ <literal>or</literal> <replaceable>def</replaceable> ]
|
[ <literal>or</literal> <replaceable>def</replaceable> ]
|
||||||
|
@ -33,19 +36,25 @@ weakest binding).</para>
|
||||||
dot-separated list of attribute names.) If the attribute
|
dot-separated list of attribute names.) If the attribute
|
||||||
doesn’t exist, return <replaceable>def</replaceable> if
|
doesn’t exist, return <replaceable>def</replaceable> if
|
||||||
provided, otherwise abort evaluation.</entry>
|
provided, otherwise abort evaluation.</entry>
|
||||||
|
<entry>1</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Application</entry>
|
||||||
<entry><replaceable>e1</replaceable> <replaceable>e2</replaceable></entry>
|
<entry><replaceable>e1</replaceable> <replaceable>e2</replaceable></entry>
|
||||||
<entry>left</entry>
|
<entry>left</entry>
|
||||||
<entry>Call function <replaceable>e1</replaceable> with
|
<entry>Call function <replaceable>e1</replaceable> with
|
||||||
argument <replaceable>e2</replaceable>.</entry>
|
argument <replaceable>e2</replaceable>.</entry>
|
||||||
|
<entry>2</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Arithmetic Negation</entry>
|
||||||
<entry><literal>-</literal> <replaceable>e</replaceable></entry>
|
<entry><literal>-</literal> <replaceable>e</replaceable></entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
<entry>Arithmetic negation.</entry>
|
<entry>Arithmetic negation.</entry>
|
||||||
|
<entry>3</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Has Attribute</entry>
|
||||||
<entry><replaceable>e</replaceable> <literal>?</literal>
|
<entry><replaceable>e</replaceable> <literal>?</literal>
|
||||||
<replaceable>attrpath</replaceable></entry>
|
<replaceable>attrpath</replaceable></entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
|
@ -53,34 +62,69 @@ weakest binding).</para>
|
||||||
the attribute denoted by <replaceable>attrpath</replaceable>;
|
the attribute denoted by <replaceable>attrpath</replaceable>;
|
||||||
return <literal>true</literal> or
|
return <literal>true</literal> or
|
||||||
<literal>false</literal>.</entry>
|
<literal>false</literal>.</entry>
|
||||||
|
<entry>4</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>List Concatenation</entry>
|
||||||
<entry><replaceable>e1</replaceable> <literal>++</literal> <replaceable>e2</replaceable></entry>
|
<entry><replaceable>e1</replaceable> <literal>++</literal> <replaceable>e2</replaceable></entry>
|
||||||
<entry>right</entry>
|
<entry>right</entry>
|
||||||
<entry>List concatenation.</entry>
|
<entry>List concatenation.</entry>
|
||||||
|
<entry>5</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Multiplication</entry>
|
||||||
<entry>
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal>*</literal> <replaceable>e2</replaceable>,
|
<replaceable>e1</replaceable> <literal>*</literal> <replaceable>e2</replaceable>,
|
||||||
|
</entry>
|
||||||
|
<entry>left</entry>
|
||||||
|
<entry>Arithmetic multiplication.</entry>
|
||||||
|
<entry>6</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Division</entry>
|
||||||
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal>/</literal> <replaceable>e2</replaceable>
|
<replaceable>e1</replaceable> <literal>/</literal> <replaceable>e2</replaceable>
|
||||||
</entry>
|
</entry>
|
||||||
<entry>left</entry>
|
<entry>left</entry>
|
||||||
<entry>Arithmetic multiplication and division.</entry>
|
<entry>Arithmetic division.</entry>
|
||||||
|
<entry>6</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Addition</entry>
|
||||||
|
<entry>
|
||||||
|
<replaceable>e1</replaceable> <literal>+</literal> <replaceable>e2</replaceable>
|
||||||
|
</entry>
|
||||||
|
<entry>left</entry>
|
||||||
|
<entry>Arithmetic addition.</entry>
|
||||||
|
<entry>7</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Subtraction</entry>
|
||||||
<entry>
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal>+</literal> <replaceable>e2</replaceable>,
|
|
||||||
<replaceable>e1</replaceable> <literal>-</literal> <replaceable>e2</replaceable>
|
<replaceable>e1</replaceable> <literal>-</literal> <replaceable>e2</replaceable>
|
||||||
</entry>
|
</entry>
|
||||||
<entry>left</entry>
|
<entry>left</entry>
|
||||||
<entry>Arithmetic addition and subtraction. String or path concatenation (only by <literal>+</literal>).</entry>
|
<entry>Arithmetic subtraction.</entry>
|
||||||
|
<entry>7</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>String Concatenation</entry>
|
||||||
|
<entry>
|
||||||
|
<replaceable>string1</replaceable> <literal>+</literal> <replaceable>string2</replaceable>
|
||||||
|
</entry>
|
||||||
|
<entry>left</entry>
|
||||||
|
<entry>String concatenation.</entry>
|
||||||
|
<entry>7</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Not</entry>
|
||||||
<entry><literal>!</literal> <replaceable>e</replaceable></entry>
|
<entry><literal>!</literal> <replaceable>e</replaceable></entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
<entry>Boolean negation.</entry>
|
<entry>Boolean negation.</entry>
|
||||||
|
<entry>8</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Update</entry>
|
||||||
<entry><replaceable>e1</replaceable> <literal>//</literal>
|
<entry><replaceable>e1</replaceable> <literal>//</literal>
|
||||||
<replaceable>e2</replaceable></entry>
|
<replaceable>e2</replaceable></entry>
|
||||||
<entry>right</entry>
|
<entry>right</entry>
|
||||||
|
@ -89,47 +133,90 @@ weakest binding).</para>
|
||||||
<replaceable>e2</replaceable> (with the latter taking
|
<replaceable>e2</replaceable> (with the latter taking
|
||||||
precedence over the former in case of equally named
|
precedence over the former in case of equally named
|
||||||
attributes).</entry>
|
attributes).</entry>
|
||||||
|
<entry>9</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Less Than</entry>
|
||||||
<entry>
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal><</literal> <replaceable>e2</replaceable>,
|
<replaceable>e1</replaceable> <literal><</literal> <replaceable>e2</replaceable>,
|
||||||
<replaceable>e1</replaceable> <literal>></literal> <replaceable>e2</replaceable>,
|
</entry>
|
||||||
<replaceable>e1</replaceable> <literal><=</literal> <replaceable>e2</replaceable>,
|
<entry>none</entry>
|
||||||
|
<entry>Arithmetic comparison.</entry>
|
||||||
|
<entry>10</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Less Than or Equal To</entry>
|
||||||
|
<entry>
|
||||||
|
<replaceable>e1</replaceable> <literal><=</literal> <replaceable>e2</replaceable>
|
||||||
|
</entry>
|
||||||
|
<entry>none</entry>
|
||||||
|
<entry>Arithmetic comparison.</entry>
|
||||||
|
<entry>10</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Greater Than</entry>
|
||||||
|
<entry>
|
||||||
|
<replaceable>e1</replaceable> <literal>></literal> <replaceable>e2</replaceable>
|
||||||
|
</entry>
|
||||||
|
<entry>none</entry>
|
||||||
|
<entry>Arithmetic comparison.</entry>
|
||||||
|
<entry>10</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Greater Than or Equal To</entry>
|
||||||
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal>>=</literal> <replaceable>e2</replaceable>
|
<replaceable>e1</replaceable> <literal>>=</literal> <replaceable>e2</replaceable>
|
||||||
</entry>
|
</entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
<entry>Arithmetic comparison.</entry>
|
<entry>Arithmetic comparison.</entry>
|
||||||
|
<entry>10</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Equality</entry>
|
||||||
|
<entry>
|
||||||
|
<replaceable>e1</replaceable> <literal>==</literal> <replaceable>e2</replaceable>
|
||||||
|
</entry>
|
||||||
|
<entry>none</entry>
|
||||||
|
<entry>Equality.</entry>
|
||||||
|
<entry>11</entry>
|
||||||
|
</row>
|
||||||
|
<row>
|
||||||
|
<entry>Inequality</entry>
|
||||||
<entry>
|
<entry>
|
||||||
<replaceable>e1</replaceable> <literal>==</literal> <replaceable>e2</replaceable>,
|
|
||||||
<replaceable>e1</replaceable> <literal>!=</literal> <replaceable>e2</replaceable>
|
<replaceable>e1</replaceable> <literal>!=</literal> <replaceable>e2</replaceable>
|
||||||
</entry>
|
</entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
<entry>Equality and inequality.</entry>
|
<entry>Inequality.</entry>
|
||||||
|
<entry>11</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Logical AND</entry>
|
||||||
<entry><replaceable>e1</replaceable> <literal>&&</literal>
|
<entry><replaceable>e1</replaceable> <literal>&&</literal>
|
||||||
<replaceable>e2</replaceable></entry>
|
<replaceable>e2</replaceable></entry>
|
||||||
<entry>left</entry>
|
<entry>left</entry>
|
||||||
<entry>Logical AND.</entry>
|
<entry>Logical AND.</entry>
|
||||||
|
<entry>12</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Logical OR</entry>
|
||||||
<entry><replaceable>e1</replaceable> <literal>||</literal>
|
<entry><replaceable>e1</replaceable> <literal>||</literal>
|
||||||
<replaceable>e2</replaceable></entry>
|
<replaceable>e2</replaceable></entry>
|
||||||
<entry>left</entry>
|
<entry>left</entry>
|
||||||
<entry>Logical OR.</entry>
|
<entry>Logical OR.</entry>
|
||||||
|
<entry>13</entry>
|
||||||
</row>
|
</row>
|
||||||
<row>
|
<row>
|
||||||
|
<entry>Logical Implication</entry>
|
||||||
<entry><replaceable>e1</replaceable> <literal>-></literal>
|
<entry><replaceable>e1</replaceable> <literal>-></literal>
|
||||||
<replaceable>e2</replaceable></entry>
|
<replaceable>e2</replaceable></entry>
|
||||||
<entry>none</entry>
|
<entry>none</entry>
|
||||||
<entry>Logical implication (equivalent to
|
<entry>Logical implication (equivalent to
|
||||||
<literal>!<replaceable>e1</replaceable> ||
|
<literal>!<replaceable>e1</replaceable> ||
|
||||||
<replaceable>e2</replaceable></literal>).</entry>
|
<replaceable>e2</replaceable></literal>).</entry>
|
||||||
|
<entry>14</entry>
|
||||||
</row>
|
</row>
|
||||||
</tbody>
|
</tbody>
|
||||||
</tgroup>
|
</tgroup>
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
</section>
|
</section>
|
||||||
|
|
|
@ -67,5 +67,23 @@ $ sudo launchctl kickstart -k system/org.nixos.nix-daemon
|
||||||
</screen>
|
</screen>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
<section xml:id="sec-installer-proxy-settings">
|
||||||
|
|
||||||
|
<title>Proxy Environment Variables</title>
|
||||||
|
|
||||||
|
<para>The Nix installer has special handling for these proxy-related
|
||||||
|
environment variables:
|
||||||
|
<varname>http_proxy</varname>, <varname>https_proxy</varname>,
|
||||||
|
<varname>ftp_proxy</varname>, <varname>no_proxy</varname>,
|
||||||
|
<varname>HTTP_PROXY</varname>, <varname>HTTPS_PROXY</varname>,
|
||||||
|
<varname>FTP_PROXY</varname>, <varname>NO_PROXY</varname>.
|
||||||
|
</para>
|
||||||
|
<para>If any of these variables are set when running the Nix installer,
|
||||||
|
then the installer will create an override file at
|
||||||
|
<filename>/etc/systemd/system/nix-daemon.service.d/override.conf</filename>
|
||||||
|
so <command>nix-daemon</command> will use them.
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
</section>
|
</section>
|
||||||
</chapter>
|
</chapter>
|
||||||
|
|
|
@ -4,9 +4,22 @@
|
||||||
version="5.0"
|
version="5.0"
|
||||||
xml:id="ssec-relnotes-2.3">
|
xml:id="ssec-relnotes-2.3">
|
||||||
|
|
||||||
<title>Release 2.3 (????-??-??)</title>
|
<title>Release 2.3 (2019-09-04)</title>
|
||||||
|
|
||||||
<para>This release contains the following changes:</para>
|
<para>This is primarily a bug fix release. However, it makes some
|
||||||
|
incompatible changes:</para>
|
||||||
|
|
||||||
|
<itemizedlist>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Nix now uses BSD file locks instead of POSIX file
|
||||||
|
locks. Because of this, you should not use Nix 2.3 and previous
|
||||||
|
releases at the same time on a Nix store.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
</itemizedlist>
|
||||||
|
|
||||||
|
<para>It also has the following changes:</para>
|
||||||
|
|
||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
|
|
||||||
|
@ -18,5 +31,62 @@
|
||||||
already begin with <literal>refs/</literal>.
|
already begin with <literal>refs/</literal>.
|
||||||
</para>
|
</para>
|
||||||
</listitem>
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>The installer now enables sandboxing by default on
|
||||||
|
Linux. The <literal>max-jobs</literal> setting now defaults to
|
||||||
|
1.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>New builtin functions:
|
||||||
|
<literal>builtins.isPath</literal>,
|
||||||
|
<literal>builtins.hashFile</literal>.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>The <command>nix</command> command has a new
|
||||||
|
<option>--print-build-logs</option> (<option>-L</option>) flag to
|
||||||
|
print build log output to stderr, rather than showing the last log
|
||||||
|
line in the progress bar. To distinguish between concurrent
|
||||||
|
builds, log lines are prefixed by the name of the package.
|
||||||
|
</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Builds are now executed in a pseudo-terminal, and the
|
||||||
|
<envar>TERM</envar> environment variable is set to
|
||||||
|
<literal>xterm-256color</literal>. This allows many programs
|
||||||
|
(e.g. <command>gcc</command>, <command>clang</command>,
|
||||||
|
<command>cmake</command>) to print colorized log output.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Add <option>--no-net</option> convenience flag. This flag
|
||||||
|
disables substituters; sets the <literal>tarball-ttl</literal>
|
||||||
|
setting to infinity (ensuring that any previously downloaded files
|
||||||
|
are considered current); and disables retrying downloads and sets
|
||||||
|
the connection timeout to the minimum. This flag is enabled
|
||||||
|
automatically if there are no configured non-loopback network
|
||||||
|
interfaces.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Add a <literal>post-build-hook</literal> setting to run a
|
||||||
|
program after a build has succeeded.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>Add a <literal>trace-function-calls</literal> setting to log
|
||||||
|
the duration of Nix function calls to stderr.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
|
<listitem>
|
||||||
|
<para>On Linux, sandboxing is now disabled by default on systems
|
||||||
|
that don’t have the necessary kernel support.</para>
|
||||||
|
</listitem>
|
||||||
|
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
|
|
||||||
</section>
|
</section>
|
||||||
|
|
|
@ -44,7 +44,7 @@ print STDERR "Nix revision is $nixRev, version is $version\n";
|
||||||
|
|
||||||
File::Path::make_path($releasesDir);
|
File::Path::make_path($releasesDir);
|
||||||
if (system("mountpoint -q $releasesDir") != 0) {
|
if (system("mountpoint -q $releasesDir") != 0) {
|
||||||
system("sshfs hydra-mirror:/releases $releasesDir") == 0 or die;
|
system("sshfs hydra-mirror\@nixos.org:/releases $releasesDir") == 0 or die;
|
||||||
}
|
}
|
||||||
|
|
||||||
my $releaseDir = "$releasesDir/nix/$releaseName";
|
my $releaseDir = "$releasesDir/nix/$releaseName";
|
||||||
|
|
|
@ -13,8 +13,12 @@
|
||||||
<true/>
|
<true/>
|
||||||
<key>RunAtLoad</key>
|
<key>RunAtLoad</key>
|
||||||
<true/>
|
<true/>
|
||||||
<key>Program</key>
|
<key>ProgramArguments</key>
|
||||||
<string>@bindir@/nix-daemon</string>
|
<array>
|
||||||
|
<string>/bin/sh</string>
|
||||||
|
<string>-c</string>
|
||||||
|
<string>/bin/wait4path @bindir@/nix-daemon && @bindir@/nix-daemon</string>
|
||||||
|
</array>
|
||||||
<key>StandardErrorPath</key>
|
<key>StandardErrorPath</key>
|
||||||
<string>/var/log/nix-daemon.log</string>
|
<string>/var/log/nix-daemon.log</string>
|
||||||
<key>StandardOutPath</key>
|
<key>StandardOutPath</key>
|
||||||
|
|
|
@ -7,3 +7,6 @@ ConditionPathIsReadWrite=@localstatedir@/nix/daemon-socket
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
|
ExecStart=@@bindir@/nix-daemon nix-daemon --daemon
|
||||||
KillMode=process
|
KillMode=process
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
13
release.nix
13
release.nix
|
@ -72,7 +72,12 @@ let
|
||||||
# https://github.com/NixOS/nixpkgs/issues/45462
|
# https://github.com/NixOS/nixpkgs/issues/45462
|
||||||
''
|
''
|
||||||
mkdir -p $out/lib
|
mkdir -p $out/lib
|
||||||
cp ${boost}/lib/libboost_context* $out/lib
|
cp -pd ${boost}/lib/{libboost_context*,libboost_thread*,libboost_system*} $out/lib
|
||||||
|
rm -f $out/lib/*.a
|
||||||
|
${lib.optionalString stdenv.isLinux ''
|
||||||
|
chmod u+w $out/lib/*.so.*
|
||||||
|
patchelf --set-rpath $out/lib:${stdenv.cc.cc.lib}/lib $out/lib/libboost_thread.so.*
|
||||||
|
''}
|
||||||
'';
|
'';
|
||||||
|
|
||||||
configureFlags = configureFlags ++
|
configureFlags = configureFlags ++
|
||||||
|
@ -165,10 +170,10 @@ let
|
||||||
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
chmod +x $TMPDIR/install-systemd-multi-user.sh
|
||||||
chmod +x $TMPDIR/install-multi-user
|
chmod +x $TMPDIR/install-multi-user
|
||||||
dir=nix-${version}-${system}
|
dir=nix-${version}-${system}
|
||||||
fn=$out/$dir.tar.bz2
|
fn=$out/$dir.tar.xz
|
||||||
mkdir -p $out/nix-support
|
mkdir -p $out/nix-support
|
||||||
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
|
echo "file binary-dist $fn" >> $out/nix-support/hydra-build-products
|
||||||
tar cvfj $fn \
|
tar cvfJ $fn \
|
||||||
--owner=0 --group=0 --mode=u+rw,uga+r \
|
--owner=0 --group=0 --mode=u+rw,uga+r \
|
||||||
--absolute-names \
|
--absolute-names \
|
||||||
--hard-dereference \
|
--hard-dereference \
|
||||||
|
@ -295,7 +300,7 @@ let
|
||||||
|
|
||||||
substitute ${./scripts/install.in} $out/install \
|
substitute ${./scripts/install.in} $out/install \
|
||||||
${pkgs.lib.concatMapStrings
|
${pkgs.lib.concatMapStrings
|
||||||
(system: "--replace '@binaryTarball_${system}@' $(nix hash-file --base16 --type sha256 ${binaryTarball.${system}}/*.tar.bz2) ")
|
(system: "--replace '@binaryTarball_${system}@' $(nix hash-file --base16 --type sha256 ${binaryTarball.${system}}/*.tar.xz) ")
|
||||||
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
|
[ "x86_64-linux" "i686-linux" "x86_64-darwin" "aarch64-linux" ]
|
||||||
} \
|
} \
|
||||||
--replace '@nixVersion@' ${build.x86_64-linux.src.version}
|
--replace '@nixVersion@' ${build.x86_64-linux.src.version}
|
||||||
|
|
|
@ -330,7 +330,7 @@ EOF
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ -d /nix ]; then
|
if [ -d /nix/store ] || [ -d /nix/var ]; then
|
||||||
failure <<EOF
|
failure <<EOF
|
||||||
There are some relics of a previous installation of Nix at /nix, and
|
There are some relics of a previous installation of Nix at /nix, and
|
||||||
this scripts assumes Nix is _not_ yet installed. Please delete the old
|
this scripts assumes Nix is _not_ yet installed. Please delete the old
|
||||||
|
@ -758,9 +758,13 @@ main() {
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
# shellcheck source=./install-darwin-multi-user.sh
|
# shellcheck source=./install-darwin-multi-user.sh
|
||||||
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
|
. "$EXTRACTED_NIX_PATH/install-darwin-multi-user.sh"
|
||||||
elif [ "$(uname -s)" = "Linux" ] && [ -e /run/systemd/system ]; then
|
elif [ "$(uname -s)" = "Linux" ]; then
|
||||||
# shellcheck source=./install-systemd-multi-user.sh
|
if [ -e /run/systemd/system ]; then
|
||||||
. "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh"
|
# shellcheck source=./install-systemd-multi-user.sh
|
||||||
|
. "$EXTRACTED_NIX_PATH/install-systemd-multi-user.sh"
|
||||||
|
else
|
||||||
|
failure "Sorry, the multi-user installation requires systemd on Linux (detected using /run/systemd/system)"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
failure "Sorry, I don't know what to do on $(uname)"
|
failure "Sorry, I don't know what to do on $(uname)"
|
||||||
fi
|
fi
|
||||||
|
|
34
scripts/install-systemd-multi-user.sh
Normal file → Executable file
34
scripts/install-systemd-multi-user.sh
Normal file → Executable file
|
@ -9,6 +9,38 @@ readonly SERVICE_DEST=/etc/systemd/system/nix-daemon.service
|
||||||
readonly SOCKET_SRC=/lib/systemd/system/nix-daemon.socket
|
readonly SOCKET_SRC=/lib/systemd/system/nix-daemon.socket
|
||||||
readonly SOCKET_DEST=/etc/systemd/system/nix-daemon.socket
|
readonly SOCKET_DEST=/etc/systemd/system/nix-daemon.socket
|
||||||
|
|
||||||
|
|
||||||
|
# Path for the systemd override unit file to contain the proxy settings
|
||||||
|
readonly SERVICE_OVERRIDE=${SERVICE_DEST}.d/override.conf
|
||||||
|
|
||||||
|
create_systemd_override() {
|
||||||
|
header "Configuring proxy for the nix-daemon service"
|
||||||
|
_sudo "create directory for systemd unit override" mkdir -p "$(dirname $SERVICE_OVERRIDE)"
|
||||||
|
cat <<EOF | _sudo "create systemd unit override" tee "$SERVICE_OVERRIDE"
|
||||||
|
[Service]
|
||||||
|
$1
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Gather all non-empty proxy environment variables into a string
|
||||||
|
create_systemd_proxy_env() {
|
||||||
|
vars="http_proxy https_proxy ftp_proxy no_proxy HTTP_PROXY HTTPS_PROXY FTP_PROXY NO_PROXY"
|
||||||
|
for v in $vars; do
|
||||||
|
if [ "x${!v:-}" != "x" ]; then
|
||||||
|
echo "Environment=${v}=${!v}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
handle_network_proxy() {
|
||||||
|
# Create a systemd unit override with proxy environment variables
|
||||||
|
# if any proxy environment variables are not empty.
|
||||||
|
PROXY_ENV_STRING=$(create_systemd_proxy_env)
|
||||||
|
if [ -n "${PROXY_ENV_STRING}" ]; then
|
||||||
|
create_systemd_override "${PROXY_ENV_STRING}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
poly_validate_assumptions() {
|
poly_validate_assumptions() {
|
||||||
if [ "$(uname -s)" != "Linux" ]; then
|
if [ "$(uname -s)" != "Linux" ]; then
|
||||||
failure "This script is for use with Linux!"
|
failure "This script is for use with Linux!"
|
||||||
|
@ -47,6 +79,8 @@ poly_configure_nix_daemon_service() {
|
||||||
_sudo "to set up the nix-daemon socket service" \
|
_sudo "to set up the nix-daemon socket service" \
|
||||||
systemctl enable "/nix/var/nix/profiles/default$SOCKET_SRC"
|
systemctl enable "/nix/var/nix/profiles/default$SOCKET_SRC"
|
||||||
|
|
||||||
|
handle_network_proxy
|
||||||
|
|
||||||
_sudo "to load the systemd unit for nix-daemon" \
|
_sudo "to load the systemd unit for nix-daemon" \
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
|
|
||||||
|
|
|
@ -30,12 +30,11 @@ case "$(uname -s).$(uname -m)" in
|
||||||
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
*) oops "sorry, there is no binary distribution of Nix for your platform";;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.bz2"
|
url="https://nixos.org/releases/nix/nix-@nixVersion@/nix-@nixVersion@-$system.tar.xz"
|
||||||
|
|
||||||
tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.bz2")"
|
tarball="$tmpDir/$(basename "$tmpDir/nix-@nixVersion@-$system.tar.xz")"
|
||||||
|
|
||||||
require_util curl "download the binary tarball"
|
require_util curl "download the binary tarball"
|
||||||
require_util bzcat "decompress the binary tarball"
|
|
||||||
require_util tar "unpack the binary tarball"
|
require_util tar "unpack the binary tarball"
|
||||||
|
|
||||||
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
|
echo "downloading Nix @nixVersion@ binary tarball for $system from '$url' to '$tmpDir'..."
|
||||||
|
@ -57,7 +56,7 @@ fi
|
||||||
|
|
||||||
unpack=$tmpDir/unpack
|
unpack=$tmpDir/unpack
|
||||||
mkdir -p "$unpack"
|
mkdir -p "$unpack"
|
||||||
< "$tarball" bzcat | tar -xf - -C "$unpack" || oops "failed to unpack '$url'"
|
tar -xf "$tarball" -C "$unpack" || oops "failed to unpack '$url'"
|
||||||
|
|
||||||
script=$(echo "$unpack"/*/install)
|
script=$(echo "$unpack"/*/install)
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include "json.hh"
|
#include "json.hh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
#include <chrono>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <sys/time.h>
|
#include <sys/time.h>
|
||||||
|
@ -16,7 +17,6 @@
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <fstream>
|
#include <fstream>
|
||||||
|
|
||||||
#include <sys/time.h>
|
|
||||||
#include <sys/resource.h>
|
#include <sys/resource.h>
|
||||||
|
|
||||||
#if HAVE_BOEHMGC
|
#if HAVE_BOEHMGC
|
||||||
|
@ -1094,9 +1094,13 @@ void EvalState::callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
|
void EvalState::callFunction(Value & fun, Value & arg, Value & v, const Pos & pos)
|
||||||
{
|
{
|
||||||
|
std::optional<FunctionCallTrace> trace;
|
||||||
|
if (evalSettings.traceFunctionCalls) {
|
||||||
|
trace.emplace(pos);
|
||||||
|
}
|
||||||
|
|
||||||
forceValue(fun, pos);
|
forceValue(fun, pos);
|
||||||
|
|
||||||
if (fun.type == tPrimOp || fun.type == tPrimOpApp) {
|
if (fun.type == tPrimOp || fun.type == tPrimOpApp) {
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include "symbol-table.hh"
|
#include "symbol-table.hh"
|
||||||
#include "hash.hh"
|
#include "hash.hh"
|
||||||
#include "config.hh"
|
#include "config.hh"
|
||||||
|
#include "function-trace.hh"
|
||||||
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
@ -349,6 +350,9 @@ struct EvalSettings : Config
|
||||||
|
|
||||||
Setting<Strings> allowedUris{this, {}, "allowed-uris",
|
Setting<Strings> allowedUris{this, {}, "allowed-uris",
|
||||||
"Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
|
"Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
|
||||||
|
|
||||||
|
Setting<bool> traceFunctionCalls{this, false, "trace-function-calls",
|
||||||
|
"Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)"};
|
||||||
};
|
};
|
||||||
|
|
||||||
extern EvalSettings evalSettings;
|
extern EvalSettings evalSettings;
|
||||||
|
|
24
src/libexpr/function-trace.hh
Normal file
24
src/libexpr/function-trace.hh
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "eval.hh"
|
||||||
|
#include <sys/time.h>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
struct FunctionCallTrace
|
||||||
|
{
|
||||||
|
const Pos & pos;
|
||||||
|
|
||||||
|
FunctionCallTrace(const Pos & pos) : pos(pos) {
|
||||||
|
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch();
|
||||||
|
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
|
||||||
|
printMsg(lvlInfo, "function-trace entered %1% at %2%", pos, ns.count());
|
||||||
|
}
|
||||||
|
|
||||||
|
~FunctionCallTrace() {
|
||||||
|
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch();
|
||||||
|
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
|
||||||
|
printMsg(lvlInfo, "function-trace exited %1% at %2%", pos, ns.count());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
|
@ -111,9 +111,9 @@ static void parseJSON(EvalState & state, const char * & s, Value & v)
|
||||||
mkFloat(v, stod(tmp_number));
|
mkFloat(v, stod(tmp_number));
|
||||||
else
|
else
|
||||||
mkInt(v, stol(tmp_number));
|
mkInt(v, stol(tmp_number));
|
||||||
} catch (std::invalid_argument e) {
|
} catch (std::invalid_argument & e) {
|
||||||
throw JSONParseError("invalid JSON number");
|
throw JSONParseError("invalid JSON number");
|
||||||
} catch (std::out_of_range e) {
|
} catch (std::out_of_range & e) {
|
||||||
throw JSONParseError("out-of-range JSON number");
|
throw JSONParseError("out-of-range JSON number");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,7 @@ GitInfo exportGit(ref<Store> store, const std::string & uri,
|
||||||
|
|
||||||
try {
|
try {
|
||||||
runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" });
|
runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" });
|
||||||
} catch (ExecError e) {
|
} catch (ExecError & e) {
|
||||||
if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw;
|
if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw;
|
||||||
clean = false;
|
clean = false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,6 +80,7 @@ string getArg(const string & opt,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#if OPENSSL_VERSION_NUMBER < 0x10101000L
|
||||||
/* OpenSSL is not thread-safe by default - it will randomly crash
|
/* OpenSSL is not thread-safe by default - it will randomly crash
|
||||||
unless the user supplies a mutex locking function. So let's do
|
unless the user supplies a mutex locking function. So let's do
|
||||||
that. */
|
that. */
|
||||||
|
@ -92,6 +93,7 @@ static void opensslLockCallback(int mode, int type, const char * file, int line)
|
||||||
else
|
else
|
||||||
opensslLocks[type].unlock();
|
opensslLocks[type].unlock();
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
static void sigHandler(int signo) { }
|
static void sigHandler(int signo) { }
|
||||||
|
@ -105,9 +107,11 @@ void initNix()
|
||||||
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
|
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if OPENSSL_VERSION_NUMBER < 0x10101000L
|
||||||
/* Initialise OpenSSL locking. */
|
/* Initialise OpenSSL locking. */
|
||||||
opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
|
opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
|
||||||
CRYPTO_set_locking_callback(opensslLockCallback);
|
CRYPTO_set_locking_callback(opensslLockCallback);
|
||||||
|
#endif
|
||||||
|
|
||||||
loadConfFile();
|
loadConfFile();
|
||||||
|
|
||||||
|
@ -125,6 +129,15 @@ void initNix()
|
||||||
act.sa_handler = sigHandler;
|
act.sa_handler = sigHandler;
|
||||||
if (sigaction(SIGUSR1, &act, 0)) throw SysError("handling SIGUSR1");
|
if (sigaction(SIGUSR1, &act, 0)) throw SysError("handling SIGUSR1");
|
||||||
|
|
||||||
|
#if __APPLE__
|
||||||
|
/* HACK: on darwin, we need can’t use sigprocmask with SIGWINCH.
|
||||||
|
* Instead, add a dummy sigaction handler, and signalHandlerThread
|
||||||
|
* can handle the rest. */
|
||||||
|
struct sigaction sa;
|
||||||
|
sa.sa_handler = sigHandler;
|
||||||
|
if (sigaction(SIGWINCH, &sa, 0)) throw SysError("handling SIGWINCH");
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Register a SIGSEGV handler to detect stack overflows. */
|
/* Register a SIGSEGV handler to detect stack overflows. */
|
||||||
detectStackOverflow();
|
detectStackOverflow();
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,13 @@
|
||||||
#include "nar-info-disk-cache.hh"
|
#include "nar-info-disk-cache.hh"
|
||||||
#include "nar-accessor.hh"
|
#include "nar-accessor.hh"
|
||||||
#include "json.hh"
|
#include "json.hh"
|
||||||
|
#include "thread-pool.hh"
|
||||||
|
|
||||||
#include <chrono>
|
#include <chrono>
|
||||||
|
|
||||||
#include <future>
|
#include <future>
|
||||||
|
#include <regex>
|
||||||
|
|
||||||
|
#include <nlohmann/json.hpp>
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
|
||||||
|
@ -55,7 +58,7 @@ void BinaryCacheStore::init()
|
||||||
}
|
}
|
||||||
|
|
||||||
void BinaryCacheStore::getFile(const std::string & path,
|
void BinaryCacheStore::getFile(const std::string & path,
|
||||||
Callback<std::shared_ptr<std::string>> callback)
|
Callback<std::shared_ptr<std::string>> callback) noexcept
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
callback(getFile(path));
|
callback(getFile(path));
|
||||||
|
@ -139,6 +142,11 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
|
||||||
|
|
||||||
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
|
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
|
||||||
|
|
||||||
|
auto narAccessor = makeNarAccessor(nar);
|
||||||
|
|
||||||
|
if (accessor_)
|
||||||
|
accessor_->addToCache(info.path, *nar, narAccessor);
|
||||||
|
|
||||||
/* Optionally write a JSON file containing a listing of the
|
/* Optionally write a JSON file containing a listing of the
|
||||||
contents of the NAR. */
|
contents of the NAR. */
|
||||||
if (writeNARListing) {
|
if (writeNARListing) {
|
||||||
|
@ -148,11 +156,6 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
|
||||||
JSONObject jsonRoot(jsonOut);
|
JSONObject jsonRoot(jsonOut);
|
||||||
jsonRoot.attr("version", 1);
|
jsonRoot.attr("version", 1);
|
||||||
|
|
||||||
auto narAccessor = makeNarAccessor(nar);
|
|
||||||
|
|
||||||
if (accessor_)
|
|
||||||
accessor_->addToCache(info.path, *nar, narAccessor);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
auto res = jsonRoot.placeholder("root");
|
auto res = jsonRoot.placeholder("root");
|
||||||
listNar(res, narAccessor, "", true);
|
listNar(res, narAccessor, "", true);
|
||||||
|
@ -162,11 +165,6 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
|
||||||
upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
|
upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
|
||||||
}
|
}
|
||||||
|
|
||||||
else {
|
|
||||||
if (accessor_)
|
|
||||||
accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Compress the NAR. */
|
/* Compress the NAR. */
|
||||||
narInfo->compression = compression;
|
narInfo->compression = compression;
|
||||||
auto now1 = std::chrono::steady_clock::now();
|
auto now1 = std::chrono::steady_clock::now();
|
||||||
|
@ -181,12 +179,70 @@ void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::str
|
||||||
% ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
|
% ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
|
||||||
% duration);
|
% duration);
|
||||||
|
|
||||||
/* Atomically write the NAR file. */
|
|
||||||
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
|
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
|
||||||
+ (compression == "xz" ? ".xz" :
|
+ (compression == "xz" ? ".xz" :
|
||||||
compression == "bzip2" ? ".bz2" :
|
compression == "bzip2" ? ".bz2" :
|
||||||
compression == "br" ? ".br" :
|
compression == "br" ? ".br" :
|
||||||
"");
|
"");
|
||||||
|
|
||||||
|
/* Optionally maintain an index of DWARF debug info files
|
||||||
|
consisting of JSON files named 'debuginfo/<build-id>' that
|
||||||
|
specify the NAR file and member containing the debug info. */
|
||||||
|
if (writeDebugInfo) {
|
||||||
|
|
||||||
|
std::string buildIdDir = "/lib/debug/.build-id";
|
||||||
|
|
||||||
|
if (narAccessor->stat(buildIdDir).type == FSAccessor::tDirectory) {
|
||||||
|
|
||||||
|
ThreadPool threadPool(25);
|
||||||
|
|
||||||
|
auto doFile = [&](std::string member, std::string key, std::string target) {
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
nlohmann::json json;
|
||||||
|
json["archive"] = target;
|
||||||
|
json["member"] = member;
|
||||||
|
|
||||||
|
// FIXME: or should we overwrite? The previous link may point
|
||||||
|
// to a GC'ed file, so overwriting might be useful...
|
||||||
|
if (fileExists(key)) return;
|
||||||
|
|
||||||
|
printMsg(lvlTalkative, "creating debuginfo link from '%s' to '%s'", key, target);
|
||||||
|
|
||||||
|
upsertFile(key, json.dump(), "application/json");
|
||||||
|
};
|
||||||
|
|
||||||
|
std::regex regex1("^[0-9a-f]{2}$");
|
||||||
|
std::regex regex2("^[0-9a-f]{38}\\.debug$");
|
||||||
|
|
||||||
|
for (auto & s1 : narAccessor->readDirectory(buildIdDir)) {
|
||||||
|
auto dir = buildIdDir + "/" + s1;
|
||||||
|
|
||||||
|
if (narAccessor->stat(dir).type != FSAccessor::tDirectory
|
||||||
|
|| !std::regex_match(s1, regex1))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
for (auto & s2 : narAccessor->readDirectory(dir)) {
|
||||||
|
auto debugPath = dir + "/" + s2;
|
||||||
|
|
||||||
|
if (narAccessor->stat(debugPath).type != FSAccessor::tRegular
|
||||||
|
|| !std::regex_match(s2, regex2))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
auto buildId = s1 + s2;
|
||||||
|
|
||||||
|
std::string key = "debuginfo/" + buildId;
|
||||||
|
std::string target = "../" + narInfo->url;
|
||||||
|
|
||||||
|
threadPool.enqueue(std::bind(doFile, std::string(debugPath, 1), key, target));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
threadPool.process();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Atomically write the NAR file. */
|
||||||
if (repair || !fileExists(narInfo->url)) {
|
if (repair || !fileExists(narInfo->url)) {
|
||||||
stats.narWrite++;
|
stats.narWrite++;
|
||||||
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
|
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
|
||||||
|
@ -240,7 +296,7 @@ void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
|
||||||
}
|
}
|
||||||
|
|
||||||
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback)
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
auto uri = getUri();
|
auto uri = getUri();
|
||||||
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
||||||
|
@ -249,21 +305,23 @@ void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
||||||
|
|
||||||
auto narInfoFile = narInfoFileFor(storePath);
|
auto narInfoFile = narInfoFileFor(storePath);
|
||||||
|
|
||||||
|
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||||
|
|
||||||
getFile(narInfoFile,
|
getFile(narInfoFile,
|
||||||
{[=](std::future<std::shared_ptr<std::string>> fut) {
|
{[=](std::future<std::shared_ptr<std::string>> fut) {
|
||||||
try {
|
try {
|
||||||
auto data = fut.get();
|
auto data = fut.get();
|
||||||
|
|
||||||
if (!data) return callback(nullptr);
|
if (!data) return (*callbackPtr)(nullptr);
|
||||||
|
|
||||||
stats.narInfoRead++;
|
stats.narInfoRead++;
|
||||||
|
|
||||||
callback((std::shared_ptr<ValidPathInfo>)
|
(*callbackPtr)((std::shared_ptr<ValidPathInfo>)
|
||||||
std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
||||||
|
|
||||||
(void) act; // force Activity into this lambda to ensure it stays alive
|
(void) act; // force Activity into this lambda to ensure it stays alive
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
callback.rethrow();
|
callbackPtr->rethrow();
|
||||||
}
|
}
|
||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,7 @@ public:
|
||||||
|
|
||||||
const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
|
const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
|
||||||
const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
|
const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
|
||||||
|
const Setting<bool> writeDebugInfo{this, false, "index-debug-info", "whether to index DWARF debug info files by build ID"};
|
||||||
const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
|
const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
|
||||||
const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"};
|
const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"};
|
||||||
const Setting<bool> parallelCompression{this, false, "parallel-compression",
|
const Setting<bool> parallelCompression{this, false, "parallel-compression",
|
||||||
|
@ -47,7 +48,7 @@ public:
|
||||||
/* Fetch the specified file and call the specified callback with
|
/* Fetch the specified file and call the specified callback with
|
||||||
the result. A subclass may implement this asynchronously. */
|
the result. A subclass may implement this asynchronously. */
|
||||||
virtual void getFile(const std::string & path,
|
virtual void getFile(const std::string & path,
|
||||||
Callback<std::shared_ptr<std::string>> callback);
|
Callback<std::shared_ptr<std::string>> callback) noexcept;
|
||||||
|
|
||||||
std::shared_ptr<std::string> getFile(const std::string & path);
|
std::shared_ptr<std::string> getFile(const std::string & path);
|
||||||
|
|
||||||
|
@ -73,7 +74,7 @@ public:
|
||||||
bool isValidPathUncached(const Path & path) override;
|
bool isValidPathUncached(const Path & path) override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) override;
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
Path queryPathFromHashPart(const string & hashPart) override
|
Path queryPathFromHashPart(const string & hashPart) override
|
||||||
{ unsupported("queryPathFromHashPart"); }
|
{ unsupported("queryPathFromHashPart"); }
|
||||||
|
|
|
@ -1197,7 +1197,7 @@ void DerivationGoal::haveDerivation()
|
||||||
/* We are first going to try to create the invalid output paths
|
/* We are first going to try to create the invalid output paths
|
||||||
through substitutes. If that doesn't work, we'll build
|
through substitutes. If that doesn't work, we'll build
|
||||||
them. */
|
them. */
|
||||||
if (settings.useSubstitutes && drv->substitutesAllowed())
|
if (settings.useSubstitutes && parsedDrv->substitutesAllowed())
|
||||||
for (auto & i : invalidOutputs)
|
for (auto & i : invalidOutputs)
|
||||||
addWaitee(worker.makeSubstitutionGoal(i, buildMode == bmRepair ? Repair : NoRepair));
|
addWaitee(worker.makeSubstitutionGoal(i, buildMode == bmRepair ? Repair : NoRepair));
|
||||||
|
|
||||||
|
@ -1629,6 +1629,61 @@ void DerivationGoal::buildDone()
|
||||||
being valid. */
|
being valid. */
|
||||||
registerOutputs();
|
registerOutputs();
|
||||||
|
|
||||||
|
if (settings.postBuildHook != "") {
|
||||||
|
Activity act(*logger, lvlInfo, actPostBuildHook,
|
||||||
|
fmt("running post-build-hook '%s'", settings.postBuildHook),
|
||||||
|
Logger::Fields{drvPath});
|
||||||
|
PushActivity pact(act.id);
|
||||||
|
auto outputPaths = drv->outputPaths();
|
||||||
|
std::map<std::string, std::string> hookEnvironment = getEnv();
|
||||||
|
|
||||||
|
hookEnvironment.emplace("DRV_PATH", drvPath);
|
||||||
|
hookEnvironment.emplace("OUT_PATHS", chomp(concatStringsSep(" ", outputPaths)));
|
||||||
|
|
||||||
|
RunOptions opts(settings.postBuildHook, {});
|
||||||
|
opts.environment = hookEnvironment;
|
||||||
|
|
||||||
|
struct LogSink : Sink {
|
||||||
|
Activity & act;
|
||||||
|
std::string currentLine;
|
||||||
|
|
||||||
|
LogSink(Activity & act) : act(act) { }
|
||||||
|
|
||||||
|
void operator() (const unsigned char * data, size_t len) override {
|
||||||
|
for (size_t i = 0; i < len; i++) {
|
||||||
|
auto c = data[i];
|
||||||
|
|
||||||
|
if (c == '\n') {
|
||||||
|
flushLine();
|
||||||
|
} else {
|
||||||
|
currentLine += c;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void flushLine() {
|
||||||
|
if (settings.verboseBuild) {
|
||||||
|
printError("post-build-hook: " + currentLine);
|
||||||
|
} else {
|
||||||
|
act.result(resPostBuildLogLine, currentLine);
|
||||||
|
}
|
||||||
|
currentLine.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
~LogSink() {
|
||||||
|
if (currentLine != "") {
|
||||||
|
currentLine += '\n';
|
||||||
|
flushLine();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
LogSink sink(act);
|
||||||
|
|
||||||
|
opts.standardOut = &sink;
|
||||||
|
opts.mergeStderrToStdout = true;
|
||||||
|
runProgram2(opts);
|
||||||
|
}
|
||||||
|
|
||||||
if (buildMode == bmCheck) {
|
if (buildMode == bmCheck) {
|
||||||
done(BuildResult::Built);
|
done(BuildResult::Built);
|
||||||
return;
|
return;
|
||||||
|
@ -2302,17 +2357,37 @@ void DerivationGoal::startBuilder()
|
||||||
flags |= CLONE_NEWNET;
|
flags |= CLONE_NEWNET;
|
||||||
|
|
||||||
pid_t child = clone(childEntry, stack + stackSize, flags, this);
|
pid_t child = clone(childEntry, stack + stackSize, flags, this);
|
||||||
if (child == -1 && errno == EINVAL)
|
if (child == -1 && errno == EINVAL) {
|
||||||
/* Fallback for Linux < 2.13 where CLONE_NEWPID and
|
/* Fallback for Linux < 2.13 where CLONE_NEWPID and
|
||||||
CLONE_PARENT are not allowed together. */
|
CLONE_PARENT are not allowed together. */
|
||||||
child = clone(childEntry, stack + stackSize, flags & ~CLONE_NEWPID, this);
|
flags &= ~CLONE_NEWPID;
|
||||||
|
child = clone(childEntry, stack + stackSize, flags, this);
|
||||||
|
}
|
||||||
|
if (child == -1 && (errno == EPERM || errno == EINVAL)) {
|
||||||
|
/* Some distros patch Linux to not allow unprivileged
|
||||||
|
* user namespaces. If we get EPERM or EINVAL, try
|
||||||
|
* without CLONE_NEWUSER and see if that works.
|
||||||
|
*/
|
||||||
|
flags &= ~CLONE_NEWUSER;
|
||||||
|
child = clone(childEntry, stack + stackSize, flags, this);
|
||||||
|
}
|
||||||
|
/* Otherwise exit with EPERM so we can handle this in the
|
||||||
|
parent. This is only done when sandbox-fallback is set
|
||||||
|
to true (the default). */
|
||||||
|
if (child == -1 && (errno == EPERM || errno == EINVAL) && settings.sandboxFallback)
|
||||||
|
_exit(1);
|
||||||
if (child == -1) throw SysError("cloning builder process");
|
if (child == -1) throw SysError("cloning builder process");
|
||||||
|
|
||||||
writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
|
writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
|
||||||
_exit(0);
|
_exit(0);
|
||||||
}, options);
|
}, options);
|
||||||
|
|
||||||
if (helper.wait() != 0)
|
int res = helper.wait();
|
||||||
|
if (res != 0 && settings.sandboxFallback) {
|
||||||
|
useChroot = false;
|
||||||
|
tmpDirInSandbox = tmpDir;
|
||||||
|
goto fallback;
|
||||||
|
} else if (res != 0)
|
||||||
throw Error("unable to start build process");
|
throw Error("unable to start build process");
|
||||||
|
|
||||||
userNamespaceSync.readSide = -1;
|
userNamespaceSync.readSide = -1;
|
||||||
|
@ -2335,14 +2410,14 @@ void DerivationGoal::startBuilder()
|
||||||
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
|
writeFile("/proc/" + std::to_string(pid) + "/gid_map",
|
||||||
(format("%d %d 1") % sandboxGid % hostGid).str());
|
(format("%d %d 1") % sandboxGid % hostGid).str());
|
||||||
|
|
||||||
/* Signal the builder that we've updated its user
|
/* Signal the builder that we've updated its user namespace. */
|
||||||
namespace. */
|
|
||||||
writeFull(userNamespaceSync.writeSide.get(), "1");
|
writeFull(userNamespaceSync.writeSide.get(), "1");
|
||||||
userNamespaceSync.writeSide = -1;
|
userNamespaceSync.writeSide = -1;
|
||||||
|
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
fallback:
|
||||||
options.allowVfork = !buildUser && !drv->isBuiltin();
|
options.allowVfork = !buildUser && !drv->isBuiltin();
|
||||||
pid = startProcess([&]() {
|
pid = startProcess([&]() {
|
||||||
runChild();
|
runChild();
|
||||||
|
@ -3984,17 +4059,6 @@ void SubstitutionGoal::tryToRun()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* If the store path is already locked (probably by a
|
|
||||||
DerivationGoal), then put this goal to sleep. Note: we don't
|
|
||||||
acquire a lock here since that breaks addToStore(), so below we
|
|
||||||
handle an AlreadyLocked exception from addToStore(). The check
|
|
||||||
here is just an optimisation to prevent having to redo a
|
|
||||||
download due to a locked path. */
|
|
||||||
if (pathIsLockedByMe(worker.store.toRealPath(storePath))) {
|
|
||||||
worker.waitForAWhile(shared_from_this());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
|
maintainRunningSubstitutions = std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions);
|
||||||
worker.updateProgress();
|
worker.updateProgress();
|
||||||
|
|
||||||
|
@ -4034,12 +4098,6 @@ void SubstitutionGoal::finished()
|
||||||
|
|
||||||
try {
|
try {
|
||||||
promise.get_future().get();
|
promise.get_future().get();
|
||||||
} catch (AlreadyLocked & e) {
|
|
||||||
/* Probably a DerivationGoal is already building this store
|
|
||||||
path. Sleep for a while and try again. */
|
|
||||||
state = &SubstitutionGoal::init;
|
|
||||||
worker.waitForAWhile(shared_from_this());
|
|
||||||
return;
|
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & e) {
|
||||||
printError(e.what());
|
printError(e.what());
|
||||||
|
|
||||||
|
|
|
@ -36,12 +36,6 @@ Path BasicDerivation::findOutput(const string & id) const
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool BasicDerivation::substitutesAllowed() const
|
|
||||||
{
|
|
||||||
return get(env, "allowSubstitutes", "1") == "1";
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool BasicDerivation::isBuiltin() const
|
bool BasicDerivation::isBuiltin() const
|
||||||
{
|
{
|
||||||
return string(builder, 0, 8) == "builtin:";
|
return string(builder, 0, 8) == "builtin:";
|
||||||
|
|
|
@ -56,8 +56,6 @@ struct BasicDerivation
|
||||||
the given derivation. */
|
the given derivation. */
|
||||||
Path findOutput(const string & id) const;
|
Path findOutput(const string & id) const;
|
||||||
|
|
||||||
bool substitutesAllowed() const;
|
|
||||||
|
|
||||||
bool isBuiltin() const;
|
bool isBuiltin() const;
|
||||||
|
|
||||||
/* Return true iff this is a fixed-output derivation. */
|
/* Return true iff this is a fixed-output derivation. */
|
||||||
|
|
|
@ -77,13 +77,13 @@ struct CurlDownloader : public Downloader
|
||||||
|
|
||||||
DownloadItem(CurlDownloader & downloader,
|
DownloadItem(CurlDownloader & downloader,
|
||||||
const DownloadRequest & request,
|
const DownloadRequest & request,
|
||||||
Callback<DownloadResult> callback)
|
Callback<DownloadResult> && callback)
|
||||||
: downloader(downloader)
|
: downloader(downloader)
|
||||||
, request(request)
|
, request(request)
|
||||||
, act(*logger, lvlTalkative, actDownload,
|
, act(*logger, lvlTalkative, actDownload,
|
||||||
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
||||||
{request.uri}, request.parentAct)
|
{request.uri}, request.parentAct)
|
||||||
, callback(callback)
|
, callback(std::move(callback))
|
||||||
, finalSink([this](const unsigned char * data, size_t len) {
|
, finalSink([this](const unsigned char * data, size_t len) {
|
||||||
if (this->request.dataCallback) {
|
if (this->request.dataCallback) {
|
||||||
writtenToSink += len;
|
writtenToSink += len;
|
||||||
|
@ -236,8 +236,6 @@ struct CurlDownloader : public Downloader
|
||||||
return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
|
return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
|
||||||
}
|
}
|
||||||
|
|
||||||
long lowSpeedTimeout = 300;
|
|
||||||
|
|
||||||
void init()
|
void init()
|
||||||
{
|
{
|
||||||
if (!req) req = curl_easy_init();
|
if (!req) req = curl_easy_init();
|
||||||
|
@ -297,7 +295,7 @@ struct CurlDownloader : public Downloader
|
||||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
|
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
|
||||||
|
|
||||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
|
curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
|
||||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, lowSpeedTimeout);
|
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, downloadSettings.stalledDownloadTimeout.get());
|
||||||
|
|
||||||
/* If no file exist in the specified path, curl continues to work
|
/* If no file exist in the specified path, curl continues to work
|
||||||
anyway as if netrc support was disabled. */
|
anyway as if netrc support was disabled. */
|
||||||
|
@ -344,15 +342,9 @@ struct CurlDownloader : public Downloader
|
||||||
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
|
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
|
||||||
{
|
{
|
||||||
result.cached = httpStatus == 304;
|
result.cached = httpStatus == 304;
|
||||||
|
act.progress(result.bodySize, result.bodySize);
|
||||||
done = true;
|
done = true;
|
||||||
|
callback(std::move(result));
|
||||||
try {
|
|
||||||
act.progress(result.bodySize, result.bodySize);
|
|
||||||
callback(std::move(result));
|
|
||||||
} catch (...) {
|
|
||||||
done = true;
|
|
||||||
callback.rethrow();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
else {
|
else {
|
||||||
|
@ -673,7 +665,7 @@ struct CurlDownloader : public Downloader
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
enqueueItem(std::make_shared<DownloadItem>(*this, request, callback));
|
enqueueItem(std::make_shared<DownloadItem>(*this, request, std::move(callback)));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,9 @@ struct DownloadSettings : Config
|
||||||
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
|
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
|
||||||
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
|
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
|
||||||
|
|
||||||
|
Setting<unsigned long> stalledDownloadTimeout{this, 300, "stalled-download-timeout",
|
||||||
|
"Timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration."};
|
||||||
|
|
||||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||||
"How often Nix will attempt to download a file before giving up."};
|
"How often Nix will attempt to download a file before giving up."};
|
||||||
};
|
};
|
||||||
|
@ -88,6 +91,8 @@ class Store;
|
||||||
|
|
||||||
struct Downloader
|
struct Downloader
|
||||||
{
|
{
|
||||||
|
virtual ~Downloader() { }
|
||||||
|
|
||||||
/* Enqueue a download request, returning a future to the result of
|
/* Enqueue a download request, returning a future to the result of
|
||||||
the download. The future may throw a DownloadError
|
the download. The future may throw a DownloadError
|
||||||
exception. */
|
exception. */
|
||||||
|
|
|
@ -19,6 +19,8 @@ public:
|
||||||
uint64_t narOffset = 0; // regular files only
|
uint64_t narOffset = 0; // regular files only
|
||||||
};
|
};
|
||||||
|
|
||||||
|
virtual ~FSAccessor() { }
|
||||||
|
|
||||||
virtual Stat stat(const Path & path) = 0;
|
virtual Stat stat(const Path & path) = 0;
|
||||||
|
|
||||||
virtual StringSet readDirectory(const Path & path) = 0;
|
virtual StringSet readDirectory(const Path & path) = 0;
|
||||||
|
|
|
@ -29,7 +29,7 @@ static string gcRootsDir = "gcroots";
|
||||||
read. To be precise: when they try to create a new temporary root
|
read. To be precise: when they try to create a new temporary root
|
||||||
file, they will block until the garbage collector has finished /
|
file, they will block until the garbage collector has finished /
|
||||||
yielded the GC lock. */
|
yielded the GC lock. */
|
||||||
int LocalStore::openGCLock(LockType lockType)
|
AutoCloseFD LocalStore::openGCLock(LockType lockType)
|
||||||
{
|
{
|
||||||
Path fnGCLock = (format("%1%/%2%")
|
Path fnGCLock = (format("%1%/%2%")
|
||||||
% stateDir % gcLockName).str();
|
% stateDir % gcLockName).str();
|
||||||
|
@ -49,7 +49,7 @@ int LocalStore::openGCLock(LockType lockType)
|
||||||
process that can open the file for reading can DoS the
|
process that can open the file for reading can DoS the
|
||||||
collector. */
|
collector. */
|
||||||
|
|
||||||
return fdGCLock.release();
|
return fdGCLock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -221,26 +221,22 @@ void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
|
||||||
//FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
|
//FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
|
||||||
//if (*fd == -1) continue;
|
//if (*fd == -1) continue;
|
||||||
|
|
||||||
if (path != fnTempRoots) {
|
/* Try to acquire a write lock without blocking. This can
|
||||||
|
only succeed if the owning process has died. In that case
|
||||||
/* Try to acquire a write lock without blocking. This can
|
we don't care about its temporary roots. */
|
||||||
only succeed if the owning process has died. In that case
|
if (lockFile(fd->get(), ltWrite, false)) {
|
||||||
we don't care about its temporary roots. */
|
printError(format("removing stale temporary roots file '%1%'") % path);
|
||||||
if (lockFile(fd->get(), ltWrite, false)) {
|
unlink(path.c_str());
|
||||||
printError(format("removing stale temporary roots file '%1%'") % path);
|
writeFull(fd->get(), "d");
|
||||||
unlink(path.c_str());
|
continue;
|
||||||
writeFull(fd->get(), "d");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Acquire a read lock. This will prevent the owning process
|
|
||||||
from upgrading to a write lock, therefore it will block in
|
|
||||||
addTempRoot(). */
|
|
||||||
debug(format("waiting for read lock on '%1%'") % path);
|
|
||||||
lockFile(fd->get(), ltRead, true);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Acquire a read lock. This will prevent the owning process
|
||||||
|
from upgrading to a write lock, therefore it will block in
|
||||||
|
addTempRoot(). */
|
||||||
|
debug(format("waiting for read lock on '%1%'") % path);
|
||||||
|
lockFile(fd->get(), ltRead, true);
|
||||||
|
|
||||||
/* Read the entire file. */
|
/* Read the entire file. */
|
||||||
string contents = readFile(fd->get());
|
string contents = readFile(fd->get());
|
||||||
|
|
||||||
|
@ -694,9 +690,8 @@ void LocalStore::removeUnusedLinks(const GCState & state)
|
||||||
throw SysError(format("statting '%1%'") % path);
|
throw SysError(format("statting '%1%'") % path);
|
||||||
|
|
||||||
if (st.st_nlink != 1) {
|
if (st.st_nlink != 1) {
|
||||||
unsigned long long size = st.st_blocks * 512ULL;
|
actualSize += st.st_size;
|
||||||
actualSize += size;
|
unsharedSize += (st.st_nlink - 1) * st.st_size;
|
||||||
unsharedSize += (st.st_nlink - 1) * size;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,7 +700,7 @@ void LocalStore::removeUnusedLinks(const GCState & state)
|
||||||
if (unlink(path.c_str()) == -1)
|
if (unlink(path.c_str()) == -1)
|
||||||
throw SysError(format("deleting '%1%'") % path);
|
throw SysError(format("deleting '%1%'") % path);
|
||||||
|
|
||||||
state.results.bytesFreed += st.st_blocks * 512ULL;
|
state.results.bytesFreed += st.st_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct stat st;
|
struct stat st;
|
||||||
|
@ -871,7 +866,12 @@ void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
|
||||||
void LocalStore::autoGC(bool sync)
|
void LocalStore::autoGC(bool sync)
|
||||||
{
|
{
|
||||||
auto getAvail = [this]() {
|
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE", "");
|
||||||
|
|
||||||
|
auto getAvail = [this]() -> uint64_t {
|
||||||
|
if (!fakeFreeSpaceFile.empty())
|
||||||
|
return std::stoll(readFile(fakeFreeSpaceFile));
|
||||||
|
|
||||||
struct statvfs st;
|
struct statvfs st;
|
||||||
if (statvfs(realStoreDir.c_str(), &st))
|
if (statvfs(realStoreDir.c_str(), &st))
|
||||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||||
|
@ -892,7 +892,7 @@ void LocalStore::autoGC(bool sync)
|
||||||
|
|
||||||
auto now = std::chrono::steady_clock::now();
|
auto now = std::chrono::steady_clock::now();
|
||||||
|
|
||||||
if (now < state->lastGCCheck + std::chrono::seconds(5)) return;
|
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||||
|
|
||||||
auto avail = getAvail();
|
auto avail = getAvail();
|
||||||
|
|
||||||
|
@ -919,11 +919,11 @@ void LocalStore::autoGC(bool sync)
|
||||||
promise.set_value();
|
promise.set_value();
|
||||||
});
|
});
|
||||||
|
|
||||||
printInfo("running auto-GC to free %d bytes", settings.maxFree - avail);
|
|
||||||
|
|
||||||
GCOptions options;
|
GCOptions options;
|
||||||
options.maxFreed = settings.maxFree - avail;
|
options.maxFreed = settings.maxFree - avail;
|
||||||
|
|
||||||
|
printInfo("running auto-GC to free %d bytes", options.maxFreed);
|
||||||
|
|
||||||
GCResults results;
|
GCResults results;
|
||||||
|
|
||||||
collectGarbage(options, results);
|
collectGarbage(options, results);
|
||||||
|
|
|
@ -209,6 +209,9 @@ public:
|
||||||
"The paths to make available inside the build sandbox.",
|
"The paths to make available inside the build sandbox.",
|
||||||
{"build-chroot-dirs", "build-sandbox-paths"}};
|
{"build-chroot-dirs", "build-sandbox-paths"}};
|
||||||
|
|
||||||
|
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
||||||
|
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||||
|
|
||||||
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
|
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
|
||||||
"Additional paths to make available inside the build sandbox.",
|
"Additional paths to make available inside the build sandbox.",
|
||||||
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
|
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
|
||||||
|
@ -315,6 +318,9 @@ public:
|
||||||
"pre-build-hook",
|
"pre-build-hook",
|
||||||
"A program to run just before a build to set derivation-specific build settings."};
|
"A program to run just before a build to set derivation-specific build settings."};
|
||||||
|
|
||||||
|
Setting<std::string> postBuildHook{this, "", "post-build-hook",
|
||||||
|
"A program to run just after each succesful build."};
|
||||||
|
|
||||||
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||||
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
||||||
|
|
||||||
|
@ -342,6 +348,9 @@ public:
|
||||||
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
|
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||||
"Stop deleting garbage when free disk space is above the specified amount."};
|
"Stop deleting garbage when free disk space is above the specified amount."};
|
||||||
|
|
||||||
|
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
|
||||||
|
"Number of seconds between checking free disk space."};
|
||||||
|
|
||||||
Setting<Paths> pluginFiles{this, {}, "plugin-files",
|
Setting<Paths> pluginFiles{this, {}, "plugin-files",
|
||||||
"Plugins to dynamically load at nix initialization time."};
|
"Plugins to dynamically load at nix initialization time."};
|
||||||
};
|
};
|
||||||
|
|
|
@ -131,23 +131,25 @@ protected:
|
||||||
}
|
}
|
||||||
|
|
||||||
void getFile(const std::string & path,
|
void getFile(const std::string & path,
|
||||||
Callback<std::shared_ptr<std::string>> callback) override
|
Callback<std::shared_ptr<std::string>> callback) noexcept override
|
||||||
{
|
{
|
||||||
checkEnabled();
|
checkEnabled();
|
||||||
|
|
||||||
auto request(makeRequest(path));
|
auto request(makeRequest(path));
|
||||||
|
|
||||||
|
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||||
|
|
||||||
getDownloader()->enqueueDownload(request,
|
getDownloader()->enqueueDownload(request,
|
||||||
{[callback, this](std::future<DownloadResult> result) {
|
{[callbackPtr, this](std::future<DownloadResult> result) {
|
||||||
try {
|
try {
|
||||||
callback(result.get().data);
|
(*callbackPtr)(result.get().data);
|
||||||
} catch (DownloadError & e) {
|
} catch (DownloadError & e) {
|
||||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||||
return callback(std::shared_ptr<std::string>());
|
return (*callbackPtr)(std::shared_ptr<std::string>());
|
||||||
maybeDisable();
|
maybeDisable();
|
||||||
callback.rethrow();
|
callbackPtr->rethrow();
|
||||||
} catch (...) {
|
} catch (...) {
|
||||||
callback.rethrow();
|
callbackPtr->rethrow();
|
||||||
}
|
}
|
||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,7 +88,7 @@ struct LegacySSHStore : public Store
|
||||||
}
|
}
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) override
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
auto conn(connections->get());
|
auto conn(connections->get());
|
||||||
|
|
|
@ -63,6 +63,8 @@ protected:
|
||||||
void LocalBinaryCacheStore::init()
|
void LocalBinaryCacheStore::init()
|
||||||
{
|
{
|
||||||
createDirs(binaryCacheDir + "/nar");
|
createDirs(binaryCacheDir + "/nar");
|
||||||
|
if (writeDebugInfo)
|
||||||
|
createDirs(binaryCacheDir + "/debuginfo");
|
||||||
BinaryCacheStore::init();
|
BinaryCacheStore::init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -629,7 +629,7 @@ uint64_t LocalStore::addValidPath(State & state,
|
||||||
|
|
||||||
|
|
||||||
void LocalStore::queryPathInfoUncached(const Path & path,
|
void LocalStore::queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback)
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
auto info = std::make_shared<ValidPathInfo>();
|
auto info = std::make_shared<ValidPathInfo>();
|
||||||
|
@ -879,8 +879,8 @@ void LocalStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||||
info->references,
|
info->references,
|
||||||
narInfo ? narInfo->fileSize : 0,
|
narInfo ? narInfo->fileSize : 0,
|
||||||
info->narSize};
|
info->narSize};
|
||||||
} catch (InvalidPath) {
|
} catch (InvalidPath &) {
|
||||||
} catch (SubstituterDisabled) {
|
} catch (SubstituterDisabled &) {
|
||||||
} catch (Error & e) {
|
} catch (Error & e) {
|
||||||
if (settings.tryFallback)
|
if (settings.tryFallback)
|
||||||
printError(e.what());
|
printError(e.what());
|
||||||
|
@ -1210,7 +1210,8 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
||||||
|
|
||||||
bool errors = false;
|
bool errors = false;
|
||||||
|
|
||||||
/* Acquire the global GC lock to prevent a garbage collection. */
|
/* Acquire the global GC lock to get a consistent snapshot of
|
||||||
|
existing and valid paths. */
|
||||||
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
||||||
|
|
||||||
PathSet store;
|
PathSet store;
|
||||||
|
@ -1221,13 +1222,11 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair)
|
||||||
|
|
||||||
PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
|
PathSet validPaths2 = queryAllValidPaths(), validPaths, done;
|
||||||
|
|
||||||
|
fdGCLock = -1;
|
||||||
|
|
||||||
for (auto & i : validPaths2)
|
for (auto & i : validPaths2)
|
||||||
verifyPath(i, store, done, validPaths, repair, errors);
|
verifyPath(i, store, done, validPaths, repair, errors);
|
||||||
|
|
||||||
/* Release the GC lock so that checking content hashes (which can
|
|
||||||
take ages) doesn't block the GC or builds. */
|
|
||||||
fdGCLock = -1;
|
|
||||||
|
|
||||||
/* Optionally, check the content hashes (slow). */
|
/* Optionally, check the content hashes (slow). */
|
||||||
if (checkContents) {
|
if (checkContents) {
|
||||||
printInfo("checking hashes...");
|
printInfo("checking hashes...");
|
||||||
|
|
|
@ -127,7 +127,7 @@ public:
|
||||||
PathSet queryAllValidPaths() override;
|
PathSet queryAllValidPaths() override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) override;
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ private:
|
||||||
bool isActiveTempFile(const GCState & state,
|
bool isActiveTempFile(const GCState & state,
|
||||||
const Path & path, const string & suffix);
|
const Path & path, const string & suffix);
|
||||||
|
|
||||||
int openGCLock(LockType lockType);
|
AutoCloseFD openGCLock(LockType lockType);
|
||||||
|
|
||||||
void findRoots(const Path & path, unsigned char type, Roots & roots);
|
void findRoots(const Path & path, unsigned char type, Roots & roots);
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#include "derivations.hh"
|
#include "derivations.hh"
|
||||||
|
#include "parsed-derivations.hh"
|
||||||
#include "globals.hh"
|
#include "globals.hh"
|
||||||
#include "local-store.hh"
|
#include "local-store.hh"
|
||||||
#include "store-api.hh"
|
#include "store-api.hh"
|
||||||
|
@ -189,6 +190,7 @@ void Store::queryMissing(const PathSet & targets,
|
||||||
}
|
}
|
||||||
|
|
||||||
Derivation drv = derivationFromPath(i2.first);
|
Derivation drv = derivationFromPath(i2.first);
|
||||||
|
ParsedDerivation parsedDrv(i2.first, drv);
|
||||||
|
|
||||||
PathSet invalid;
|
PathSet invalid;
|
||||||
for (auto & j : drv.outputs)
|
for (auto & j : drv.outputs)
|
||||||
|
@ -197,7 +199,7 @@ void Store::queryMissing(const PathSet & targets,
|
||||||
invalid.insert(j.second.path);
|
invalid.insert(j.second.path);
|
||||||
if (invalid.empty()) return;
|
if (invalid.empty()) return;
|
||||||
|
|
||||||
if (settings.useSubstitutes && drv.substitutesAllowed()) {
|
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||||
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
|
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
|
||||||
for (auto & output : invalid)
|
for (auto & output : invalid)
|
||||||
pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
|
pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
|
||||||
|
|
|
@ -108,4 +108,9 @@ bool ParsedDerivation::willBuildLocally() const
|
||||||
return getBoolAttr("preferLocalBuild") && canBuildLocally();
|
return getBoolAttr("preferLocalBuild") && canBuildLocally();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool ParsedDerivation::substitutesAllowed() const
|
||||||
|
{
|
||||||
|
return getBoolAttr("allowSubstitutes", true);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,6 +30,8 @@ public:
|
||||||
bool canBuildLocally() const;
|
bool canBuildLocally() const;
|
||||||
|
|
||||||
bool willBuildLocally() const;
|
bool willBuildLocally() const;
|
||||||
|
|
||||||
|
bool substitutesAllowed() const;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,9 +5,10 @@
|
||||||
#include <cerrno>
|
#include <cerrno>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
|
||||||
|
#include <fcntl.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <fcntl.h>
|
#include <sys/file.h>
|
||||||
|
|
||||||
|
|
||||||
namespace nix {
|
namespace nix {
|
||||||
|
@ -40,17 +41,14 @@ void deleteLockFile(const Path & path, int fd)
|
||||||
|
|
||||||
bool lockFile(int fd, LockType lockType, bool wait)
|
bool lockFile(int fd, LockType lockType, bool wait)
|
||||||
{
|
{
|
||||||
struct flock lock;
|
int type;
|
||||||
if (lockType == ltRead) lock.l_type = F_RDLCK;
|
if (lockType == ltRead) type = LOCK_SH;
|
||||||
else if (lockType == ltWrite) lock.l_type = F_WRLCK;
|
else if (lockType == ltWrite) type = LOCK_EX;
|
||||||
else if (lockType == ltNone) lock.l_type = F_UNLCK;
|
else if (lockType == ltNone) type = LOCK_UN;
|
||||||
else abort();
|
else abort();
|
||||||
lock.l_whence = SEEK_SET;
|
|
||||||
lock.l_start = 0;
|
|
||||||
lock.l_len = 0; /* entire file */
|
|
||||||
|
|
||||||
if (wait) {
|
if (wait) {
|
||||||
while (fcntl(fd, F_SETLKW, &lock) != 0) {
|
while (flock(fd, type) != 0) {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
if (errno != EINTR)
|
if (errno != EINTR)
|
||||||
throw SysError(format("acquiring/releasing lock"));
|
throw SysError(format("acquiring/releasing lock"));
|
||||||
|
@ -58,9 +56,9 @@ bool lockFile(int fd, LockType lockType, bool wait)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
while (fcntl(fd, F_SETLK, &lock) != 0) {
|
while (flock(fd, type | LOCK_NB) != 0) {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
if (errno == EACCES || errno == EAGAIN) return false;
|
if (errno == EWOULDBLOCK) return false;
|
||||||
if (errno != EINTR)
|
if (errno != EINTR)
|
||||||
throw SysError(format("acquiring/releasing lock"));
|
throw SysError(format("acquiring/releasing lock"));
|
||||||
}
|
}
|
||||||
|
@ -70,14 +68,6 @@ bool lockFile(int fd, LockType lockType, bool wait)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* This enables us to check whether are not already holding a lock on
|
|
||||||
a file ourselves. POSIX locks (fcntl) suck in this respect: if we
|
|
||||||
close a descriptor, the previous lock will be closed as well. And
|
|
||||||
there is no way to query whether we already have a lock (F_GETLK
|
|
||||||
only works on locks held by other processes). */
|
|
||||||
static Sync<StringSet> lockedPaths_;
|
|
||||||
|
|
||||||
|
|
||||||
PathLocks::PathLocks()
|
PathLocks::PathLocks()
|
||||||
: deletePaths(false)
|
: deletePaths(false)
|
||||||
{
|
{
|
||||||
|
@ -91,7 +81,7 @@ PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool PathLocks::lockPaths(const PathSet & _paths,
|
bool PathLocks::lockPaths(const PathSet & paths,
|
||||||
const string & waitMsg, bool wait)
|
const string & waitMsg, bool wait)
|
||||||
{
|
{
|
||||||
assert(fds.empty());
|
assert(fds.empty());
|
||||||
|
@ -99,75 +89,54 @@ bool PathLocks::lockPaths(const PathSet & _paths,
|
||||||
/* Note that `fds' is built incrementally so that the destructor
|
/* Note that `fds' is built incrementally so that the destructor
|
||||||
will only release those locks that we have already acquired. */
|
will only release those locks that we have already acquired. */
|
||||||
|
|
||||||
/* Sort the paths. This assures that locks are always acquired in
|
/* Acquire the lock for each path in sorted order. This ensures
|
||||||
the same order, thus preventing deadlocks. */
|
that locks are always acquired in the same order, thus
|
||||||
Paths paths(_paths.begin(), _paths.end());
|
preventing deadlocks. */
|
||||||
paths.sort();
|
|
||||||
|
|
||||||
/* Acquire the lock for each path. */
|
|
||||||
for (auto & path : paths) {
|
for (auto & path : paths) {
|
||||||
checkInterrupt();
|
checkInterrupt();
|
||||||
Path lockPath = path + ".lock";
|
Path lockPath = path + ".lock";
|
||||||
|
|
||||||
debug(format("locking path '%1%'") % path);
|
debug(format("locking path '%1%'") % path);
|
||||||
|
|
||||||
{
|
AutoCloseFD fd;
|
||||||
auto lockedPaths(lockedPaths_.lock());
|
|
||||||
if (lockedPaths->count(lockPath)) {
|
|
||||||
if (!wait) return false;
|
|
||||||
throw AlreadyLocked("deadlock: trying to re-acquire self-held lock '%s'", lockPath);
|
|
||||||
}
|
|
||||||
lockedPaths->insert(lockPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
while (1) {
|
||||||
|
|
||||||
AutoCloseFD fd;
|
/* Open/create the lock file. */
|
||||||
|
fd = openLockFile(lockPath, true);
|
||||||
|
|
||||||
while (1) {
|
/* Acquire an exclusive lock. */
|
||||||
|
if (!lockFile(fd.get(), ltWrite, false)) {
|
||||||
/* Open/create the lock file. */
|
if (wait) {
|
||||||
fd = openLockFile(lockPath, true);
|
if (waitMsg != "") printError(waitMsg);
|
||||||
|
lockFile(fd.get(), ltWrite, true);
|
||||||
/* Acquire an exclusive lock. */
|
} else {
|
||||||
if (!lockFile(fd.get(), ltWrite, false)) {
|
/* Failed to lock this path; release all other
|
||||||
if (wait) {
|
locks. */
|
||||||
if (waitMsg != "") printError(waitMsg);
|
unlock();
|
||||||
lockFile(fd.get(), ltWrite, true);
|
return false;
|
||||||
} else {
|
|
||||||
/* Failed to lock this path; release all other
|
|
||||||
locks. */
|
|
||||||
unlock();
|
|
||||||
lockedPaths_.lock()->erase(lockPath);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
debug(format("lock acquired on '%1%'") % lockPath);
|
|
||||||
|
|
||||||
/* Check that the lock file hasn't become stale (i.e.,
|
|
||||||
hasn't been unlinked). */
|
|
||||||
struct stat st;
|
|
||||||
if (fstat(fd.get(), &st) == -1)
|
|
||||||
throw SysError(format("statting lock file '%1%'") % lockPath);
|
|
||||||
if (st.st_size != 0)
|
|
||||||
/* This lock file has been unlinked, so we're holding
|
|
||||||
a lock on a deleted file. This means that other
|
|
||||||
processes may create and acquire a lock on
|
|
||||||
`lockPath', and proceed. So we must retry. */
|
|
||||||
debug(format("open lock file '%1%' has become stale") % lockPath);
|
|
||||||
else
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use borrow so that the descriptor isn't closed. */
|
debug(format("lock acquired on '%1%'") % lockPath);
|
||||||
fds.push_back(FDPair(fd.release(), lockPath));
|
|
||||||
|
|
||||||
} catch (...) {
|
/* Check that the lock file hasn't become stale (i.e.,
|
||||||
lockedPaths_.lock()->erase(lockPath);
|
hasn't been unlinked). */
|
||||||
throw;
|
struct stat st;
|
||||||
|
if (fstat(fd.get(), &st) == -1)
|
||||||
|
throw SysError(format("statting lock file '%1%'") % lockPath);
|
||||||
|
if (st.st_size != 0)
|
||||||
|
/* This lock file has been unlinked, so we're holding
|
||||||
|
a lock on a deleted file. This means that other
|
||||||
|
processes may create and acquire a lock on
|
||||||
|
`lockPath', and proceed. So we must retry. */
|
||||||
|
debug(format("open lock file '%1%' has become stale") % lockPath);
|
||||||
|
else
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Use borrow so that the descriptor isn't closed. */
|
||||||
|
fds.push_back(FDPair(fd.release(), lockPath));
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -189,8 +158,6 @@ void PathLocks::unlock()
|
||||||
for (auto & i : fds) {
|
for (auto & i : fds) {
|
||||||
if (deletePaths) deleteLockFile(i.second, i.first);
|
if (deletePaths) deleteLockFile(i.second, i.first);
|
||||||
|
|
||||||
lockedPaths_.lock()->erase(i.second);
|
|
||||||
|
|
||||||
if (close(i.first) == -1)
|
if (close(i.first) == -1)
|
||||||
printError(
|
printError(
|
||||||
format("error (ignored): cannot close lock file on '%1%'") % i.second);
|
format("error (ignored): cannot close lock file on '%1%'") % i.second);
|
||||||
|
@ -208,11 +175,4 @@ void PathLocks::setDeletion(bool deletePaths)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool pathIsLockedByMe(const Path & path)
|
|
||||||
{
|
|
||||||
Path lockPath = path + ".lock";
|
|
||||||
return lockedPaths_.lock()->count(lockPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,8 +16,6 @@ enum LockType { ltRead, ltWrite, ltNone };
|
||||||
|
|
||||||
bool lockFile(int fd, LockType lockType, bool wait);
|
bool lockFile(int fd, LockType lockType, bool wait);
|
||||||
|
|
||||||
MakeError(AlreadyLocked, Error);
|
|
||||||
|
|
||||||
class PathLocks
|
class PathLocks
|
||||||
{
|
{
|
||||||
private:
|
private:
|
||||||
|
@ -37,6 +35,4 @@ public:
|
||||||
void setDeletion(bool deletePaths);
|
void setDeletion(bool deletePaths);
|
||||||
};
|
};
|
||||||
|
|
||||||
bool pathIsLockedByMe(const Path & path);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,6 +191,14 @@ void RemoteStore::setOptions(Connection & conn)
|
||||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
|
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
|
||||||
std::map<std::string, Config::SettingInfo> overrides;
|
std::map<std::string, Config::SettingInfo> overrides;
|
||||||
globalConfig.getSettings(overrides, true);
|
globalConfig.getSettings(overrides, true);
|
||||||
|
overrides.erase(settings.keepFailed.name);
|
||||||
|
overrides.erase(settings.keepGoing.name);
|
||||||
|
overrides.erase(settings.tryFallback.name);
|
||||||
|
overrides.erase(settings.maxBuildJobs.name);
|
||||||
|
overrides.erase(settings.maxSilentTime.name);
|
||||||
|
overrides.erase(settings.buildCores.name);
|
||||||
|
overrides.erase(settings.useSubstitutes.name);
|
||||||
|
overrides.erase(settings.showTrace.name);
|
||||||
conn.to << overrides.size();
|
conn.to << overrides.size();
|
||||||
for (auto & i : overrides)
|
for (auto & i : overrides)
|
||||||
conn.to << i.first << i.second.value;
|
conn.to << i.first << i.second.value;
|
||||||
|
@ -221,7 +229,7 @@ struct ConnectionHandle
|
||||||
|
|
||||||
~ConnectionHandle()
|
~ConnectionHandle()
|
||||||
{
|
{
|
||||||
if (!daemonException && std::uncaught_exception()) {
|
if (!daemonException && std::uncaught_exceptions()) {
|
||||||
handle.markBad();
|
handle.markBad();
|
||||||
debug("closing daemon connection because of an exception");
|
debug("closing daemon connection because of an exception");
|
||||||
}
|
}
|
||||||
|
@ -342,7 +350,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
|
||||||
|
|
||||||
void RemoteStore::queryPathInfoUncached(const Path & path,
|
void RemoteStore::queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback)
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
std::shared_ptr<ValidPathInfo> info;
|
std::shared_ptr<ValidPathInfo> info;
|
||||||
|
|
|
@ -41,7 +41,7 @@ public:
|
||||||
PathSet queryAllValidPaths() override;
|
PathSet queryAllValidPaths() override;
|
||||||
|
|
||||||
void queryPathInfoUncached(const Path & path,
|
void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) override;
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||||
|
|
||||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||||
|
|
||||||
|
|
|
@ -97,6 +97,10 @@ void checkStoreName(const string & name)
|
||||||
reasons (e.g., "." and ".."). */
|
reasons (e.g., "." and ".."). */
|
||||||
if (string(name, 0, 1) == ".")
|
if (string(name, 0, 1) == ".")
|
||||||
throw Error(baseError % "it is illegal to start the name with a period");
|
throw Error(baseError % "it is illegal to start the name with a period");
|
||||||
|
/* Disallow names longer than 211 characters. ext4’s max is 256,
|
||||||
|
but we need extra space for the hash and .chroot extensions. */
|
||||||
|
if (name.length() > 211)
|
||||||
|
throw Error(baseError % "name must be less than 212 characters");
|
||||||
for (auto & i : name)
|
for (auto & i : name)
|
||||||
if (!((i >= 'A' && i <= 'Z') ||
|
if (!((i >= 'A' && i <= 'Z') ||
|
||||||
(i >= 'a' && i <= 'z') ||
|
(i >= 'a' && i <= 'z') ||
|
||||||
|
@ -325,13 +329,14 @@ ref<const ValidPathInfo> Store::queryPathInfo(const Path & storePath)
|
||||||
|
|
||||||
|
|
||||||
void Store::queryPathInfo(const Path & storePath,
|
void Store::queryPathInfo(const Path & storePath,
|
||||||
Callback<ref<ValidPathInfo>> callback)
|
Callback<ref<ValidPathInfo>> callback) noexcept
|
||||||
{
|
{
|
||||||
assertStorePath(storePath);
|
std::string hashPart;
|
||||||
|
|
||||||
auto hashPart = storePathToHash(storePath);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
assertStorePath(storePath);
|
||||||
|
|
||||||
|
hashPart = storePathToHash(storePath);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto res = state.lock()->pathInfoCache.get(hashPart);
|
auto res = state.lock()->pathInfoCache.get(hashPart);
|
||||||
|
@ -361,8 +366,10 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
|
|
||||||
} catch (...) { return callback.rethrow(); }
|
} catch (...) { return callback.rethrow(); }
|
||||||
|
|
||||||
|
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||||
|
|
||||||
queryPathInfoUncached(storePath,
|
queryPathInfoUncached(storePath,
|
||||||
{[this, storePath, hashPart, callback](std::future<std::shared_ptr<ValidPathInfo>> fut) {
|
{[this, storePath, hashPart, callbackPtr](std::future<std::shared_ptr<ValidPathInfo>> fut) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
auto info = fut.get();
|
auto info = fut.get();
|
||||||
|
@ -382,8 +389,8 @@ void Store::queryPathInfo(const Path & storePath,
|
||||||
throw InvalidPath("path '%s' is not valid", storePath);
|
throw InvalidPath("path '%s' is not valid", storePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
callback(ref<ValidPathInfo>(info));
|
(*callbackPtr)(ref<ValidPathInfo>(info));
|
||||||
} catch (...) { callback.rethrow(); }
|
} catch (...) { callbackPtr->rethrow(); }
|
||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -360,12 +360,12 @@ public:
|
||||||
|
|
||||||
/* Asynchronous version of queryPathInfo(). */
|
/* Asynchronous version of queryPathInfo(). */
|
||||||
void queryPathInfo(const Path & path,
|
void queryPathInfo(const Path & path,
|
||||||
Callback<ref<ValidPathInfo>> callback);
|
Callback<ref<ValidPathInfo>> callback) noexcept;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
||||||
virtual void queryPathInfoUncached(const Path & path,
|
virtual void queryPathInfoUncached(const Path & path,
|
||||||
Callback<std::shared_ptr<ValidPathInfo>> callback) = 0;
|
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
|
|
@ -170,7 +170,7 @@ public:
|
||||||
|
|
||||||
~JSONPlaceholder()
|
~JSONPlaceholder()
|
||||||
{
|
{
|
||||||
assert(!first || std::uncaught_exception());
|
assert(!first || std::uncaught_exceptions());
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
|
|
|
@ -26,6 +26,7 @@ typedef enum {
|
||||||
actVerifyPaths = 107,
|
actVerifyPaths = 107,
|
||||||
actSubstitute = 108,
|
actSubstitute = 108,
|
||||||
actQueryPathInfo = 109,
|
actQueryPathInfo = 109,
|
||||||
|
actPostBuildHook = 110,
|
||||||
} ActivityType;
|
} ActivityType;
|
||||||
|
|
||||||
typedef enum {
|
typedef enum {
|
||||||
|
@ -36,6 +37,7 @@ typedef enum {
|
||||||
resSetPhase = 104,
|
resSetPhase = 104,
|
||||||
resProgress = 105,
|
resProgress = 105,
|
||||||
resSetExpected = 106,
|
resSetExpected = 106,
|
||||||
|
resPostBuildLogLine = 107,
|
||||||
} ResultType;
|
} ResultType;
|
||||||
|
|
||||||
typedef uint64_t ActivityId;
|
typedef uint64_t ActivityId;
|
||||||
|
|
|
@ -179,6 +179,36 @@ struct TeeSource : Source
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* A reader that consumes the original Source until 'size'. */
|
||||||
|
struct SizedSource : Source
|
||||||
|
{
|
||||||
|
Source & orig;
|
||||||
|
size_t remain;
|
||||||
|
SizedSource(Source & orig, size_t size)
|
||||||
|
: orig(orig), remain(size) { }
|
||||||
|
size_t read(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
if (this->remain <= 0) {
|
||||||
|
throw EndOfFile("sized: unexpected end-of-file");
|
||||||
|
}
|
||||||
|
len = std::min(len, this->remain);
|
||||||
|
size_t n = this->orig.read(data, len);
|
||||||
|
this->remain -= n;
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Consume the original source until no remain data is left to consume. */
|
||||||
|
size_t drainAll()
|
||||||
|
{
|
||||||
|
std::vector<unsigned char> buf(8192);
|
||||||
|
size_t sum = 0;
|
||||||
|
while (this->remain > 0) {
|
||||||
|
size_t n = read(buf.data(), buf.size());
|
||||||
|
sum += n;
|
||||||
|
}
|
||||||
|
return sum;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
/* Convert a function into a sink. */
|
/* Convert a function into a sink. */
|
||||||
struct LambdaSink : Sink
|
struct LambdaSink : Sink
|
||||||
|
|
|
@ -84,6 +84,15 @@ void clearEnv()
|
||||||
unsetenv(name.first.c_str());
|
unsetenv(name.first.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void replaceEnv(std::map<std::string, std::string> newEnv)
|
||||||
|
{
|
||||||
|
clearEnv();
|
||||||
|
for (auto newEnvVar : newEnv)
|
||||||
|
{
|
||||||
|
setenv(newEnvVar.first.c_str(), newEnvVar.second.c_str(), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Path absPath(Path path, Path dir)
|
Path absPath(Path path, Path dir)
|
||||||
{
|
{
|
||||||
|
@ -388,7 +397,7 @@ static void _deletePath(const Path & path, unsigned long long & bytesFreed)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!S_ISDIR(st.st_mode) && st.st_nlink == 1)
|
if (!S_ISDIR(st.st_mode) && st.st_nlink == 1)
|
||||||
bytesFreed += st.st_blocks * 512;
|
bytesFreed += st.st_size;
|
||||||
|
|
||||||
if (S_ISDIR(st.st_mode)) {
|
if (S_ISDIR(st.st_mode)) {
|
||||||
/* Make the directory accessible. */
|
/* Make the directory accessible. */
|
||||||
|
@ -1019,10 +1028,22 @@ void runProgram2(const RunOptions & options)
|
||||||
if (options.standardOut) out.create();
|
if (options.standardOut) out.create();
|
||||||
if (source) in.create();
|
if (source) in.create();
|
||||||
|
|
||||||
|
ProcessOptions processOptions;
|
||||||
|
// vfork implies that the environment of the main process and the fork will
|
||||||
|
// be shared (technically this is undefined, but in practice that's the
|
||||||
|
// case), so we can't use it if we alter the environment
|
||||||
|
if (options.environment)
|
||||||
|
processOptions.allowVfork = false;
|
||||||
|
|
||||||
/* Fork. */
|
/* Fork. */
|
||||||
Pid pid = startProcess([&]() {
|
Pid pid = startProcess([&]() {
|
||||||
|
if (options.environment)
|
||||||
|
replaceEnv(*options.environment);
|
||||||
if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
|
if (options.standardOut && dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
|
||||||
throw SysError("dupping stdout");
|
throw SysError("dupping stdout");
|
||||||
|
if (options.mergeStderrToStdout)
|
||||||
|
if (dup2(STDOUT_FILENO, STDERR_FILENO) == -1)
|
||||||
|
throw SysError("cannot dup stdout into stderr");
|
||||||
if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1)
|
if (source && dup2(in.readSide.get(), STDIN_FILENO) == -1)
|
||||||
throw SysError("dupping stdin");
|
throw SysError("dupping stdin");
|
||||||
|
|
||||||
|
@ -1047,7 +1068,7 @@ void runProgram2(const RunOptions & options)
|
||||||
execv(options.program.c_str(), stringsToCharPtrs(args_).data());
|
execv(options.program.c_str(), stringsToCharPtrs(args_).data());
|
||||||
|
|
||||||
throw SysError("executing '%1%'", options.program);
|
throw SysError("executing '%1%'", options.program);
|
||||||
});
|
}, processOptions);
|
||||||
|
|
||||||
out.writeSide = -1;
|
out.writeSide = -1;
|
||||||
|
|
||||||
|
@ -1148,7 +1169,7 @@ void _interrupted()
|
||||||
/* Block user interrupts while an exception is being handled.
|
/* Block user interrupts while an exception is being handled.
|
||||||
Throwing an exception while another exception is being handled
|
Throwing an exception while another exception is being handled
|
||||||
kills the program! */
|
kills the program! */
|
||||||
if (!interruptThrown && !std::uncaught_exception()) {
|
if (!interruptThrown && !std::uncaught_exceptions()) {
|
||||||
interruptThrown = true;
|
interruptThrown = true;
|
||||||
throw Interrupted("interrupted by the user");
|
throw Interrupted("interrupted by the user");
|
||||||
}
|
}
|
||||||
|
|
|
@ -270,12 +270,14 @@ struct RunOptions
|
||||||
std::optional<uid_t> uid;
|
std::optional<uid_t> uid;
|
||||||
std::optional<uid_t> gid;
|
std::optional<uid_t> gid;
|
||||||
std::optional<Path> chdir;
|
std::optional<Path> chdir;
|
||||||
|
std::optional<std::map<std::string, std::string>> environment;
|
||||||
Path program;
|
Path program;
|
||||||
bool searchPath = true;
|
bool searchPath = true;
|
||||||
Strings args;
|
Strings args;
|
||||||
std::optional<std::string> input;
|
std::optional<std::string> input;
|
||||||
Source * standardIn = nullptr;
|
Source * standardIn = nullptr;
|
||||||
Sink * standardOut = nullptr;
|
Sink * standardOut = nullptr;
|
||||||
|
bool mergeStderrToStdout = false;
|
||||||
bool _killStderr = false;
|
bool _killStderr = false;
|
||||||
|
|
||||||
RunOptions(const Path & program, const Strings & args)
|
RunOptions(const Path & program, const Strings & args)
|
||||||
|
@ -443,21 +445,34 @@ string get(const T & map, const string & key, const string & def = "")
|
||||||
type T or an exception. (We abuse std::future<T> to pass the value or
|
type T or an exception. (We abuse std::future<T> to pass the value or
|
||||||
exception.) */
|
exception.) */
|
||||||
template<typename T>
|
template<typename T>
|
||||||
struct Callback
|
class Callback
|
||||||
{
|
{
|
||||||
std::function<void(std::future<T>)> fun;
|
std::function<void(std::future<T>)> fun;
|
||||||
|
std::atomic_flag done = ATOMIC_FLAG_INIT;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
Callback(std::function<void(std::future<T>)> fun) : fun(fun) { }
|
Callback(std::function<void(std::future<T>)> fun) : fun(fun) { }
|
||||||
|
|
||||||
void operator()(T && t) const
|
Callback(Callback && callback) : fun(std::move(callback.fun))
|
||||||
{
|
{
|
||||||
|
auto prev = callback.done.test_and_set();
|
||||||
|
if (prev) done.test_and_set();
|
||||||
|
}
|
||||||
|
|
||||||
|
void operator()(T && t) noexcept
|
||||||
|
{
|
||||||
|
auto prev = done.test_and_set();
|
||||||
|
assert(!prev);
|
||||||
std::promise<T> promise;
|
std::promise<T> promise;
|
||||||
promise.set_value(std::move(t));
|
promise.set_value(std::move(t));
|
||||||
fun(promise.get_future());
|
fun(promise.get_future());
|
||||||
}
|
}
|
||||||
|
|
||||||
void rethrow(const std::exception_ptr & exc = std::current_exception()) const
|
void rethrow(const std::exception_ptr & exc = std::current_exception()) noexcept
|
||||||
{
|
{
|
||||||
|
auto prev = done.test_and_set();
|
||||||
|
assert(!prev);
|
||||||
std::promise<T> promise;
|
std::promise<T> promise;
|
||||||
promise.set_exception(exc);
|
promise.set_exception(exc);
|
||||||
fun(promise.get_future());
|
fun(promise.get_future());
|
||||||
|
|
|
@ -280,7 +280,7 @@ static void _main(int argc, char * * argv)
|
||||||
auto absolute = i;
|
auto absolute = i;
|
||||||
try {
|
try {
|
||||||
absolute = canonPath(absPath(i), true);
|
absolute = canonPath(absPath(i), true);
|
||||||
} catch (Error e) {};
|
} catch (Error & e) {};
|
||||||
if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?")))
|
if (store->isStorePath(absolute) && std::regex_match(absolute, std::regex(".*\\.drv(!.*)?")))
|
||||||
drvs.push_back(DrvInfo(*state, store, absolute));
|
drvs.push_back(DrvInfo(*state, store, absolute));
|
||||||
else
|
else
|
||||||
|
|
|
@ -950,8 +950,16 @@ static void opServe(Strings opFlags, Strings opArgs)
|
||||||
info.sigs = readStrings<StringSet>(in);
|
info.sigs = readStrings<StringSet>(in);
|
||||||
in >> info.ca;
|
in >> info.ca;
|
||||||
|
|
||||||
// FIXME: race if addToStore doesn't read source?
|
if (info.narSize == 0) {
|
||||||
store->addToStore(info, in, NoRepair, NoCheckSigs);
|
throw Error("narInfo is too old and missing the narSize field");
|
||||||
|
}
|
||||||
|
|
||||||
|
SizedSource sizedSource(in, info.narSize);
|
||||||
|
|
||||||
|
store->addToStore(info, sizedSource, NoRepair, NoCheckSigs);
|
||||||
|
|
||||||
|
// consume all the data that has been sent before continuing.
|
||||||
|
sizedSource.drainAll();
|
||||||
|
|
||||||
out << 1; // indicate success
|
out << 1; // indicate success
|
||||||
|
|
||||||
|
|
|
@ -55,7 +55,7 @@ struct CmdEdit : InstallableCommand
|
||||||
int lineno;
|
int lineno;
|
||||||
try {
|
try {
|
||||||
lineno = std::stoi(std::string(pos, colon + 1));
|
lineno = std::stoi(std::string(pos, colon + 1));
|
||||||
} catch (std::invalid_argument e) {
|
} catch (std::invalid_argument & e) {
|
||||||
throw Error("cannot parse line number '%s'", pos);
|
throw Error("cannot parse line number '%s'", pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -170,6 +170,14 @@ public:
|
||||||
name, sub);
|
name, sub);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (type == actPostBuildHook) {
|
||||||
|
auto name = storePathToName(getS(fields, 0));
|
||||||
|
if (hasSuffix(name, ".drv"))
|
||||||
|
name.resize(name.size() - 4);
|
||||||
|
i->s = fmt("post-build " ANSI_BOLD "%s" ANSI_NORMAL, name);
|
||||||
|
i->name = DrvName(name).name;
|
||||||
|
}
|
||||||
|
|
||||||
if (type == actQueryPathInfo) {
|
if (type == actQueryPathInfo) {
|
||||||
auto name = storePathToName(getS(fields, 0));
|
auto name = storePathToName(getS(fields, 0));
|
||||||
i->s = fmt("querying " ANSI_BOLD "%s" ANSI_NORMAL " on %s", name, getS(fields, 1));
|
i->s = fmt("querying " ANSI_BOLD "%s" ANSI_NORMAL " on %s", name, getS(fields, 1));
|
||||||
|
@ -228,14 +236,18 @@ public:
|
||||||
update(*state);
|
update(*state);
|
||||||
}
|
}
|
||||||
|
|
||||||
else if (type == resBuildLogLine) {
|
else if (type == resBuildLogLine || type == resPostBuildLogLine) {
|
||||||
auto lastLine = trim(getS(fields, 0));
|
auto lastLine = trim(getS(fields, 0));
|
||||||
if (!lastLine.empty()) {
|
if (!lastLine.empty()) {
|
||||||
auto i = state->its.find(act);
|
auto i = state->its.find(act);
|
||||||
assert(i != state->its.end());
|
assert(i != state->its.end());
|
||||||
ActInfo info = *i->second;
|
ActInfo info = *i->second;
|
||||||
if (printBuildLogs) {
|
if (printBuildLogs) {
|
||||||
log(*state, lvlInfo, ANSI_FAINT + info.name.value_or("unnamed") + "> " + ANSI_NORMAL + lastLine);
|
auto suffix = "> ";
|
||||||
|
if (type == resPostBuildLogLine) {
|
||||||
|
suffix = " (post)> ";
|
||||||
|
}
|
||||||
|
log(*state, lvlInfo, ANSI_FAINT + info.name.value_or("unnamed") + suffix + ANSI_NORMAL + lastLine);
|
||||||
} else {
|
} else {
|
||||||
state->activities.erase(i->second);
|
state->activities.erase(i->second);
|
||||||
info.lastLine = lastLine;
|
info.lastLine = lastLine;
|
||||||
|
|
|
@ -199,7 +199,10 @@ void chrootHelper(int argc, char * * argv)
|
||||||
uid_t gid = getgid();
|
uid_t gid = getgid();
|
||||||
|
|
||||||
if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == -1)
|
if (unshare(CLONE_NEWUSER | CLONE_NEWNS) == -1)
|
||||||
throw SysError("setting up a private mount namespace");
|
/* Try with just CLONE_NEWNS in case user namespaces are
|
||||||
|
specifically disabled. */
|
||||||
|
if (unshare(CLONE_NEWNS) == -1)
|
||||||
|
throw SysError("setting up a private mount namespace");
|
||||||
|
|
||||||
/* Bind-mount realStoreDir on /nix/store. If the latter mount
|
/* Bind-mount realStoreDir on /nix/store. If the latter mount
|
||||||
point doesn't already exists, we have to create a chroot
|
point doesn't already exists, we have to create a chroot
|
||||||
|
|
|
@ -17,6 +17,7 @@ let {
|
||||||
builder = ./dependencies.builder0.sh + "/FOOBAR/../.";
|
builder = ./dependencies.builder0.sh + "/FOOBAR/../.";
|
||||||
input1 = input1 + "/.";
|
input1 = input1 + "/.";
|
||||||
input2 = "${input2}/.";
|
input2 = "${input2}/.";
|
||||||
|
input1_drv = input1;
|
||||||
meta.description = "Random test package";
|
meta.description = "Random test package";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
85
tests/function-trace.sh
Executable file
85
tests/function-trace.sh
Executable file
|
@ -0,0 +1,85 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
set +x
|
||||||
|
|
||||||
|
expect_trace() {
|
||||||
|
expr="$1"
|
||||||
|
expect="$2"
|
||||||
|
actual=$(
|
||||||
|
nix-instantiate \
|
||||||
|
--trace-function-calls \
|
||||||
|
--expr "$expr" 2>&1 \
|
||||||
|
| grep "function-trace" \
|
||||||
|
| sed -e 's/ [0-9]*$//'
|
||||||
|
);
|
||||||
|
|
||||||
|
echo -n "Tracing expression '$expr'"
|
||||||
|
set +e
|
||||||
|
msg=$(diff -swB \
|
||||||
|
<(echo "$expect") \
|
||||||
|
<(echo "$actual")
|
||||||
|
);
|
||||||
|
result=$?
|
||||||
|
set -e
|
||||||
|
if [ $result -eq 0 ]; then
|
||||||
|
echo " ok."
|
||||||
|
else
|
||||||
|
echo " failed. difference:"
|
||||||
|
echo "$msg"
|
||||||
|
return $result
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# failure inside a tryEval
|
||||||
|
expect_trace 'builtins.tryEval (throw "example")' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace entered (string):1:19 at
|
||||||
|
function-trace exited (string):1:19 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
# Missing argument to a formal function
|
||||||
|
expect_trace '({ x }: x) { }' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
# Too many arguments to a formal function
|
||||||
|
expect_trace '({ x }: x) { x = "x"; y = "y"; }' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
# Not enough arguments to a lambda
|
||||||
|
expect_trace '(x: y: x + y) 1' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
# Too many arguments to a lambda
|
||||||
|
expect_trace '(x: x) 1 2' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
# Not a function
|
||||||
|
expect_trace '1 2' "
|
||||||
|
function-trace entered undefined position at
|
||||||
|
function-trace exited undefined position at
|
||||||
|
function-trace entered (string):1:1 at
|
||||||
|
function-trace exited (string):1:1 at
|
||||||
|
"
|
||||||
|
|
||||||
|
set -e
|
70
tests/gc-auto.sh
Normal file
70
tests/gc-auto.sh
Normal file
|
@ -0,0 +1,70 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
clearStore
|
||||||
|
|
||||||
|
garbage1=$(nix add-to-store --name garbage1 ./nar-access.sh)
|
||||||
|
garbage2=$(nix add-to-store --name garbage2 ./nar-access.sh)
|
||||||
|
garbage3=$(nix add-to-store --name garbage3 ./nar-access.sh)
|
||||||
|
|
||||||
|
ls -l $garbage3
|
||||||
|
POSIXLY_CORRECT=1 du $garbage3
|
||||||
|
|
||||||
|
fake_free=$TEST_ROOT/fake-free
|
||||||
|
export _NIX_TEST_FREE_SPACE_FILE=$fake_free
|
||||||
|
echo 1100 > $fake_free
|
||||||
|
|
||||||
|
expr=$(cat <<EOF
|
||||||
|
with import ./config.nix; mkDerivation {
|
||||||
|
name = "gc-A";
|
||||||
|
buildCommand = ''
|
||||||
|
set -x
|
||||||
|
[[ \$(ls \$NIX_STORE/*-garbage? | wc -l) = 3 ]]
|
||||||
|
mkdir \$out
|
||||||
|
echo foo > \$out/bar
|
||||||
|
echo 1...
|
||||||
|
sleep 2
|
||||||
|
echo 200 > ${fake_free}.tmp1
|
||||||
|
mv ${fake_free}.tmp1 $fake_free
|
||||||
|
echo 2...
|
||||||
|
sleep 2
|
||||||
|
echo 3...
|
||||||
|
sleep 2
|
||||||
|
echo 4...
|
||||||
|
[[ \$(ls \$NIX_STORE/*-garbage? | wc -l) = 1 ]]
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
expr2=$(cat <<EOF
|
||||||
|
with import ./config.nix; mkDerivation {
|
||||||
|
name = "gc-B";
|
||||||
|
buildCommand = ''
|
||||||
|
set -x
|
||||||
|
mkdir \$out
|
||||||
|
echo foo > \$out/bar
|
||||||
|
echo 1...
|
||||||
|
sleep 2
|
||||||
|
echo 200 > ${fake_free}.tmp2
|
||||||
|
mv ${fake_free}.tmp2 $fake_free
|
||||||
|
echo 2...
|
||||||
|
sleep 2
|
||||||
|
echo 3...
|
||||||
|
sleep 2
|
||||||
|
echo 4...
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)
|
||||||
|
|
||||||
|
nix build -v -o $TEST_ROOT/result-A -L "($expr)" \
|
||||||
|
--min-free 1000 --max-free 2000 --min-free-check-interval 1 &
|
||||||
|
pid=$!
|
||||||
|
|
||||||
|
nix build -v -o $TEST_ROOT/result-B -L "($expr2)" \
|
||||||
|
--min-free 1000 --max-free 2000 --min-free-check-interval 1
|
||||||
|
|
||||||
|
wait "$pid"
|
||||||
|
|
||||||
|
[[ foo = $(cat $TEST_ROOT/result-A/bar) ]]
|
||||||
|
[[ foo = $(cat $TEST_ROOT/result-B/bar) ]]
|
|
@ -3,7 +3,9 @@ check:
|
||||||
|
|
||||||
nix_tests = \
|
nix_tests = \
|
||||||
init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
|
init.sh hash.sh lang.sh add.sh simple.sh dependencies.sh \
|
||||||
gc.sh gc-concurrent.sh \
|
gc.sh \
|
||||||
|
gc-concurrent.sh \
|
||||||
|
gc-auto.sh \
|
||||||
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
|
referrers.sh user-envs.sh logging.sh nix-build.sh misc.sh fixed.sh \
|
||||||
gc-runtime.sh check-refs.sh filter-source.sh \
|
gc-runtime.sh check-refs.sh filter-source.sh \
|
||||||
remote-store.sh export.sh export-graph.sh \
|
remote-store.sh export.sh export-graph.sh \
|
||||||
|
@ -26,7 +28,9 @@ nix_tests = \
|
||||||
check.sh \
|
check.sh \
|
||||||
plugins.sh \
|
plugins.sh \
|
||||||
search.sh \
|
search.sh \
|
||||||
nix-copy-ssh.sh
|
nix-copy-ssh.sh \
|
||||||
|
post-hook.sh \
|
||||||
|
function-trace.sh
|
||||||
# parallel.sh
|
# parallel.sh
|
||||||
|
|
||||||
install-tests += $(foreach x, $(nix_tests), tests/$(x))
|
install-tests += $(foreach x, $(nix_tests), tests/$(x))
|
||||||
|
|
15
tests/post-hook.sh
Normal file
15
tests/post-hook.sh
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
source common.sh
|
||||||
|
|
||||||
|
clearStore
|
||||||
|
|
||||||
|
export REMOTE_STORE=$TEST_ROOT/remote_store
|
||||||
|
|
||||||
|
# Build the dependencies and push them to the remote store
|
||||||
|
nix-build -o $TEST_ROOT/result dependencies.nix --post-build-hook $PWD/push-to-store.sh
|
||||||
|
|
||||||
|
clearStore
|
||||||
|
|
||||||
|
# Ensure that we the remote store contains both the runtime and buildtime
|
||||||
|
# closure of what we've just built
|
||||||
|
nix copy --from "$REMOTE_STORE" --no-require-sigs -f dependencies.nix
|
||||||
|
nix copy --from "$REMOTE_STORE" --no-require-sigs -f dependencies.nix input1_drv
|
4
tests/push-to-store.sh
Executable file
4
tests/push-to-store.sh
Executable file
|
@ -0,0 +1,4 @@
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo Pushing "$@" to "$REMOTE_STORE"
|
||||||
|
printf "%s" "$OUT_PATHS" | xargs -d: nix copy --to "$REMOTE_STORE" --no-require-sigs
|
Loading…
Reference in a new issue