S3: c'est oui

This commit is contained in:
Félix Baylac Jacqué 2024-01-23 14:06:45 +01:00
parent ebd390987f
commit 627906e6e6
15 changed files with 342 additions and 66 deletions

189
Cargo.lock generated
View File

@ -322,6 +322,37 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "aws-config"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "004dc45f6b869e6a70725df448004a720b7f52f6607d55d8815cbd5448f86def"
dependencies = [
"aws-credential-types",
"aws-http 0.60.0",
"aws-runtime",
"aws-sdk-sso",
"aws-sdk-ssooidc",
"aws-sdk-sts",
"aws-smithy-async",
"aws-smithy-http 0.60.0",
"aws-smithy-json 0.60.0",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"fastrand",
"hex",
"http",
"hyper",
"ring",
"time",
"tokio",
"tracing",
"zeroize",
]
[[package]] [[package]]
name = "aws-credential-types" name = "aws-credential-types"
version = "1.1.0" version = "1.1.0"
@ -334,6 +365,22 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "aws-http"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361c4310fdce94328cc2d1ca0c8a48c13f43009c61d3367585685a50ca8c66b6"
dependencies = [
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http",
"http-body",
"pin-project-lite",
"tracing",
]
[[package]] [[package]]
name = "aws-http" name = "aws-http"
version = "0.61.0" version = "0.61.0"
@ -357,11 +404,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c6d61ac3425f2bd1d69393b96569a7408467f8927a5cfeba597b19f78ebb185" checksum = "5c6d61ac3425f2bd1d69393b96569a7408467f8927a5cfeba597b19f78ebb185"
dependencies = [ dependencies = [
"aws-credential-types", "aws-credential-types",
"aws-http", "aws-http 0.61.0",
"aws-sigv4", "aws-sigv4",
"aws-smithy-async", "aws-smithy-async",
"aws-smithy-eventstream", "aws-smithy-eventstream",
"aws-smithy-http", "aws-smithy-http 0.61.0",
"aws-smithy-runtime-api", "aws-smithy-runtime-api",
"aws-smithy-types", "aws-smithy-types",
"aws-types", "aws-types",
@ -379,18 +426,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "693ff3ba604fa0db18799fb770c7cde5c3e9302a7b7644647c4e46a615b96e23" checksum = "693ff3ba604fa0db18799fb770c7cde5c3e9302a7b7644647c4e46a615b96e23"
dependencies = [ dependencies = [
"aws-credential-types", "aws-credential-types",
"aws-http", "aws-http 0.61.0",
"aws-runtime", "aws-runtime",
"aws-sigv4", "aws-sigv4",
"aws-smithy-async", "aws-smithy-async",
"aws-smithy-checksums", "aws-smithy-checksums",
"aws-smithy-eventstream", "aws-smithy-eventstream",
"aws-smithy-http", "aws-smithy-http 0.61.0",
"aws-smithy-json", "aws-smithy-json 0.61.0",
"aws-smithy-runtime", "aws-smithy-runtime",
"aws-smithy-runtime-api", "aws-smithy-runtime-api",
"aws-smithy-types", "aws-smithy-types",
"aws-smithy-xml", "aws-smithy-xml 0.61.0",
"aws-types", "aws-types",
"bytes", "bytes",
"http", "http",
@ -402,6 +449,73 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "aws-sdk-sso"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86575c7604dcdb583aba3390200e5333d8e4fe597bad54f57b190aaf4fac9771"
dependencies = [
"aws-credential-types",
"aws-http 0.60.0",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http 0.60.0",
"aws-smithy-json 0.60.0",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http",
"regex",
"tracing",
]
[[package]]
name = "aws-sdk-ssooidc"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef0d7c1d0730adb5e85407174483a579e39576e0f4350ecd0fac69ec1217b1b"
dependencies = [
"aws-credential-types",
"aws-http 0.60.0",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http 0.60.0",
"aws-smithy-json 0.60.0",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-types",
"bytes",
"http",
"regex",
"tracing",
]
[[package]]
name = "aws-sdk-sts"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f45778089751d5aa8645a02dd60865fa0eea39f00be5db2c7779bc50b83db19a"
dependencies = [
"aws-credential-types",
"aws-http 0.60.0",
"aws-runtime",
"aws-smithy-async",
"aws-smithy-http 0.60.0",
"aws-smithy-json 0.60.0",
"aws-smithy-query",
"aws-smithy-runtime",
"aws-smithy-runtime-api",
"aws-smithy-types",
"aws-smithy-xml 0.60.3",
"aws-types",
"http",
"regex",
"tracing",
]
[[package]] [[package]]
name = "aws-sigv4" name = "aws-sigv4"
version = "1.1.0" version = "1.1.0"
@ -410,7 +524,7 @@ checksum = "82f39bf5bfa061fd1487a7ba274927dd6d70feed5cecaf3367932bcc83148d8f"
dependencies = [ dependencies = [
"aws-credential-types", "aws-credential-types",
"aws-smithy-eventstream", "aws-smithy-eventstream",
"aws-smithy-http", "aws-smithy-http 0.61.0",
"aws-smithy-runtime-api", "aws-smithy-runtime-api",
"aws-smithy-types", "aws-smithy-types",
"bytes", "bytes",
@ -447,7 +561,7 @@ version = "0.61.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3c4fd37c10269ad70de25cfbe29f52c1ae6fc48606a2b1ed2c4bdeb624d5da9" checksum = "d3c4fd37c10269ad70de25cfbe29f52c1ae6fc48606a2b1ed2c4bdeb624d5da9"
dependencies = [ dependencies = [
"aws-smithy-http", "aws-smithy-http 0.61.0",
"aws-smithy-types", "aws-smithy-types",
"bytes", "bytes",
"crc32c", "crc32c",
@ -473,6 +587,26 @@ dependencies = [
"crc32fast", "crc32fast",
] ]
[[package]]
name = "aws-smithy-http"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b1de8aee22f67de467b2e3d0dd0fb30859dc53f579a63bd5381766b987db644"
dependencies = [
"aws-smithy-runtime-api",
"aws-smithy-types",
"bytes",
"bytes-utils",
"futures-core",
"http",
"http-body",
"once_cell",
"percent-encoding",
"pin-project-lite",
"pin-utils",
"tracing",
]
[[package]] [[package]]
name = "aws-smithy-http" name = "aws-smithy-http"
version = "0.61.0" version = "0.61.0"
@ -494,6 +628,15 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "aws-smithy-json"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a46dd338dc9576d6a6a5b5a19bd678dcad018ececee11cf28ecd7588bd1a55c"
dependencies = [
"aws-smithy-types",
]
[[package]] [[package]]
name = "aws-smithy-json" name = "aws-smithy-json"
version = "0.61.0" version = "0.61.0"
@ -503,6 +646,16 @@ dependencies = [
"aws-smithy-types", "aws-smithy-types",
] ]
[[package]]
name = "aws-smithy-query"
version = "0.60.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "feb5b8c7a86d4b6399169670723b7e6f21a39fc833a30f5c5a2f997608178129"
dependencies = [
"aws-smithy-types",
"urlencoding",
]
[[package]] [[package]]
name = "aws-smithy-runtime" name = "aws-smithy-runtime"
version = "1.1.0" version = "1.1.0"
@ -510,7 +663,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de8c54dd9c5a159013f1e6885cb7c1ae8fc98dc286d2aebe71737effef28e37" checksum = "0de8c54dd9c5a159013f1e6885cb7c1ae8fc98dc286d2aebe71737effef28e37"
dependencies = [ dependencies = [
"aws-smithy-async", "aws-smithy-async",
"aws-smithy-http", "aws-smithy-http 0.61.0",
"aws-smithy-runtime-api", "aws-smithy-runtime-api",
"aws-smithy-types", "aws-smithy-types",
"bytes", "bytes",
@ -541,6 +694,7 @@ dependencies = [
"pin-project-lite", "pin-project-lite",
"tokio", "tokio",
"tracing", "tracing",
"zeroize",
] ]
[[package]] [[package]]
@ -566,6 +720,15 @@ dependencies = [
"tokio-util", "tokio-util",
] ]
[[package]]
name = "aws-smithy-xml"
version = "0.60.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef796feaf894d7fd03869235237aeffe73ed1b29a3927cceeee2eecadf876eba"
dependencies = [
"xmlparser",
]
[[package]] [[package]]
name = "aws-smithy-xml" name = "aws-smithy-xml"
version = "0.61.0" version = "0.61.0"
@ -1674,7 +1837,9 @@ version = "0.1.0"
dependencies = [ dependencies = [
"actix-web", "actix-web",
"anyhow", "anyhow",
"aws-config",
"aws-sdk-s3", "aws-sdk-s3",
"aws-types",
"chrono", "chrono",
"clap", "clap",
"deadpool-postgres", "deadpool-postgres",
@ -2873,6 +3038,12 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "urlencoding"
version = "2.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "1.4.1" version = "1.4.1"

View File

@ -22,6 +22,8 @@ refinery = { version = "0.8.11", features = ["tokio-postgres"] }
uuid = { version = "1.4.1", features = ["v4"] } uuid = { version = "1.4.1", features = ["v4"] }
chrono = { version = "*", features = ["serde"] } chrono = { version = "*", features = ["serde"] }
aws-sdk-s3 = "1.6.0" aws-sdk-s3 = "1.6.0"
aws-config = "1.0.3"
aws-types = "*"
[dependencies.heck] [dependencies.heck]
version = "0.4.1" version = "0.4.1"

View File

@ -30,6 +30,7 @@ CREATE TABLE Projects (
id SERIAL PRIMARY KEY NOT NULL, id SERIAL PRIMARY KEY NOT NULL,
name text NOT NULL UNIQUE, name text NOT NULL UNIQUE,
binary_cache_id integer NOT NULL, binary_cache_id integer NOT NULL,
closure_generation_nb integer NOT NULL,
-- TODO: figure out rules -- TODO: figure out rules
CONSTRAINT fk_project_binary_cache FOREIGN KEY (binary_cache_id) REFERENCES BinaryCaches(id) CONSTRAINT fk_project_binary_cache FOREIGN KEY (binary_cache_id) REFERENCES BinaryCaches(id)
); );
@ -43,13 +44,28 @@ CREATE TABLE ProjectTokens (
CREATE TABLE Closures ( CREATE TABLE Closures (
id SERIAL PRIMARY KEY NOT NULL, id SERIAL PRIMARY KEY NOT NULL,
project_id integer NOT NULL, project_id INTEGER NOT NULL,
objects text[] NOT NULL, objects text[] NOT NULL,
date timestamp NOT NULL, date timestamp NOT NULL,
CONSTRAINT fk_project_closure FOREIGN KEY (project_id) REFERENCES Projects(id) CONSTRAINT fk_project_closure FOREIGN KEY (project_id) REFERENCES Projects(id)
); );
CREATE TABLE Objects (
id SERIAL PRIMARY KEY NOT NULL,
key text NOT NULL
);
CREATE TABLE ObjectClosure (
object_id INTEGER NOT NULL,
closure_id INTEGER NOT NULL,
CONSTRAINT fk_objectclosure_object FOREIGN KEY (object_id) REFERENCES Objects(id),
CONSTRAINT fk_objectclosure_closure FOREIGN KEY (closure_id) REFERENCES Closures(id),
PRIMARY KEY (object_id, closure_id)
);
-- We'll mostly querying the Keys using the associated uid. -- We'll mostly querying the Keys using the associated uid.
CREATE INDEX idx_keys_uid ON Keys USING HASH (user_id); CREATE INDEX idx_keys_uid ON Keys USING HASH (user_id);
-- We'll be often sorting Closures through their datetime. -- We'll be often sorting Closures through their datetime.
CREATE INDEX idx_date_closures ON Closures USING HASH (date); CREATE INDEX idx_date_closures ON Closures USING HASH (date);
-- We'll be querying objects through their names.
CREATE INDEX idx_objects_key ON Objects USING HASH (key);

View File

@ -6,11 +6,14 @@ dbname="nomnomdev"
port="12345" port="12345"
dbdir="$(mktemp -d)" dbdir="$(mktemp -d)"
garagedir="$(mktemp -d)"
garageaddr="[::1]:3900"
garagebucket="nix-cache"
cfgfile="${dbdir}/config.json" cfgfile="${dbdir}/config.json"
trap 'rm -rf ${dbdir}' EXIT trap 'rm -rf ${dbdir}' EXIT
initdb "$dbdir" initdb "$dbdir"
postgres -D "${dbdir}" -c unix_socket_directories="${dbdir}" -c listen_addresses= -c port="${port}" & postgres -D "${dbdir}" -c unix_socket_directories="${dbdir}" -c listen_addresses= -c port="${port}" > /dev/null &
pgpid=$! pgpid=$!
# Trick to help the "./psql" script to find the DB dir & co # Trick to help the "./psql" script to find the DB dir & co
@ -21,19 +24,79 @@ export dbname="$dbname"
export cfgfile="$cfgfile" export cfgfile="$cfgfile"
EOF EOF
trap 'rm -rf ${dbdir} && rm /tmp/nom-nom-dev-args && kill ${pgpid}' EXIT # Garage Directory
cat <<EOF > "$garagedir/config.toml"
metadata_dir = "$garagedir/meta"
data_dir = "$garagedir/data"
block_size = 1048576
replication_mode = "1"
compression_level = 1
rpc_secret = "4425f5c26c5e11581d3223904324dcb5b5d5dfb14e5e7f35e38c595424f5f1e6"
rpc_bind_addr = "[::]:3901"
rpc_public_addr = "[::]:3901"
bootstrap_peers = [
]
consul_host = "consul.service"
consul_service_name = "garage-daemon"
sled_cache_capacity = 134217728
sled_flush_every_ms = 2000
[s3_api]
api_bind_addr = "${garageaddr}"
s3_region = "garage"
root_domain = ".s3.garage"
[s3_web]
bind_addr = "[::]:3902"
root_domain = ".web.garage"
index = "index.html"
EOF
garage -c "$garagedir/config.toml" server &
garagepid=$!
trap 'rm -rf ${dbdir} && rm -rf ${garagedir} && rm /tmp/nom-nom-dev-args && kill ${pgpid} && kill ${garagepid}' EXIT
# Yeah, this is very meh. We need to wait for the server to be ready # Yeah, this is very meh. We need to wait for the server to be ready
# to receive requests to create the DB. # to receive requests to create the DB.
sleep 2 sleep 2
createdb -h "${dbdir}" -p "${port}" "${dbname}" createdb -h "${dbdir}" -p "${port}" "${dbname}"
garage -c "$garagedir/config.toml" status
garagenodeid=$(garage -c "$garagedir/config.toml" node id | cut -f 1 -d '@')
garage -c "$garagedir/config.toml" layout assign "$garagenodeid" -c 500MB -z zone
garage -c "$garagedir/config.toml" layout show
garage -c "$garagedir/config.toml" layout apply --version 1
garage -c "$garagedir/config.toml" status
garage -c "$garagedir/config.toml" bucket create "${garagebucket}"
garage -c "$garagedir/config.toml" key create nomnom-key
garage -c "$garagedir/config.toml" bucket allow --read --write --owner "${garagebucket}" --key nomnom-key
garagekeyid=$(garage -c "$garagedir/config.toml" key info nomnom-key | grep "Key ID" | cut -f3 -d " ")
garagekeysecret=$(garage -c "$garagedir/config.toml" key info --show-secret nomnom-key | grep "Secret key" | cut -f3 -d " ")
access_key_filepath=$(mktemp)
echo "${garagekeyid}" > "${access_key_filepath}"
secret_key_filepath=$(mktemp)
echo "${garagekeysecret}" > "${secret_key_filepath}"
cat <<EOF > "${cfgfile}" cat <<EOF > "${cfgfile}"
{ {
"url": "http://localhost:8001", "url": "http://localhost:8001",
"db_host": "${dbdir}", "db_host": "${dbdir}",
"db_port": ${port}, "db_port": ${port},
"db_name": "${dbname}" "db_name": "${dbname}",
"s3_endpoint": "http://${garageaddr}",
"s3_region": "garage",
"s3_bucket": "${garagebucket}",
"s3_access_key_filepath": "${access_key_filepath}",
"s3_secret_key_filepath": "${secret_key_filepath}"
} }
EOF EOF
@ -45,4 +108,4 @@ if [ -f dump.sql ]; then
./psql -f dump.sql ./psql -f dump.sql
fi fi
cargo run --bin nom-nom-gc-server -- --bind "[::1]:8001" --config "${cfgfile}" RUST_BACKTRACE=1 cargo run --bin nom-nom-gc-server -- --bind "[::1]:8001" --config "${cfgfile}"

View File

@ -3,10 +3,12 @@
pkgs.mkShell { pkgs.mkShell {
nativeBuildInputs = [ nativeBuildInputs = [
pkgs.rustc pkgs.rustc
pkgs.rustfmt
pkgs.cargo pkgs.cargo
pkgs.rust-analyzer pkgs.rust-analyzer
pkgs.pkg-config pkgs.pkg-config
pkgs.postgresql pkgs.postgresql
pkgs.garage
]; ];
buildInputs = [ buildInputs = [
pkgs.openssl pkgs.openssl

View File

@ -29,7 +29,7 @@ async fn main() -> Result<()> {
let config = read_config(&args.config.unwrap_or("/etc/nom-nom-gc/config.json".to_owned())) let config = read_config(&args.config.unwrap_or("/etc/nom-nom-gc/config.json".to_owned()))
.unwrap_or_else(|e| panic!("Cannot read config file: {}", e)); .unwrap_or_else(|e| panic!("Cannot read config file: {}", e));
// todo: don't consume config in appstate new // todo: don't consume config in appstate new
let state = models::AppState::new(config.clone()); let state = models::AppState::new(config.clone()).await;
match args.command { match args.command {
Command::RegisterUser(args) => register_user(args.username, state, config).await, Command::RegisterUser(args) => register_user(args.username, state, config).await,
} }

View File

@ -31,3 +31,7 @@ pub async fn new_binary_cache_post(app_state: web::Data<AppState<'_>>, req: Http
.finish() .finish()
} }
} }
pub async fn get_binary_cache(app_state: web::Data<AppState<'_>>, req: HttpRequest, path: web::Path<String>) -> impl Responder {
HttpResponse::NotImplemented().finish()
}

View File

@ -2,7 +2,7 @@ use actix_web::{HttpResponse, http::header::{ContentType, self}, web, HttpReques
use chrono::Local; use chrono::Local;
use uuid::Uuid; use uuid::Uuid;
use crate::{models::{AppState, SessionUuid, User, ProjectSummary}, templates}; use crate::{models::{AppState, SessionUuid, User}, templates};
pub mod authentication; pub mod authentication;
pub mod binary_cache; pub mod binary_cache;
@ -11,14 +11,14 @@ pub use authentication::*;
pub use binary_cache::*; pub use binary_cache::*;
pub async fn landing_page (app_state: web::Data<AppState<'_>>) -> HttpResponse { pub async fn landing_page (app_state: web::Data<AppState<'_>>) -> HttpResponse {
let summaries: Vec<ProjectSummary> = vec![ /* let summaries: Vec<ProjectSummary> = vec![
ProjectSummary { ProjectSummary {
name: "Test Project".to_string(), name: "Test Project".to_string(),
latest_closure: "/nix/store/blabla".to_string(), latest_closure: "/nix/store/blabla".to_string(),
latest_closure_datetime: Local::now(), latest_closure_datetime: Local::now(),
} }
]; ];*/
let content: String = templates::landing_page(app_state.hbs.clone(), true, summaries).unwrap(); let content: String = templates::landing_page(app_state.hbs.clone(), true).unwrap();
HttpResponse::Ok() HttpResponse::Ok()
.content_type(ContentType::html()) .content_type(ContentType::html())
.body(content) .body(content)

View File

@ -2,3 +2,4 @@ pub mod app;
pub mod handlers; pub mod handlers;
pub mod models; pub mod models;
pub mod templates; pub mod templates;
pub mod s3;

View File

@ -2,6 +2,9 @@ use std::collections::HashMap;
use std::fs; use std::fs;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::Arc; use std::sync::Arc;
use aws_config::{BehaviorVersion, Region};
use aws_sdk_s3::Client;
use aws_sdk_s3::config::{Credentials, SharedCredentialsProvider};
use chrono::{DateTime, Local}; use chrono::{DateTime, Local};
use postgres_types::{FromSql, ToSql}; use postgres_types::{FromSql, ToSql};
use url::Url; use url::Url;
@ -23,7 +26,12 @@ pub struct Configuration {
pub url: String, pub url: String,
pub db_host: Option<String>, pub db_host: Option<String>,
pub db_port: Option<u16>, pub db_port: Option<u16>,
pub db_name: String pub db_name: String,
pub s3_endpoint: String,
pub s3_region: String,
pub s3_bucket: String,
pub s3_access_key_filepath: String,
pub s3_secret_key_filepath: String
} }
@ -71,7 +79,8 @@ pub struct AppState<'a>{
pub webauthn: Arc<Webauthn>, pub webauthn: Arc<Webauthn>,
pub db: Pool, pub db: Pool,
pub hbs: Arc<Handlebars<'a>>, pub hbs: Arc<Handlebars<'a>>,
pub session: TempSession pub session: TempSession,
pub s3_client: aws_sdk_s3::Client
} }
mod embedded { mod embedded {
@ -91,23 +100,17 @@ pub struct BinaryCache {
pub access_key: String, pub access_key: String,
pub secret_key: String, pub secret_key: String,
pub region: String, pub region: String,
pub endpoint: String pub endpoint_url: String
} }
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
pub struct Project { pub struct Project {
pub name: String, pub name: String,
} pub latest_closure_generation: u32
#[derive(Serialize, Clone, Debug, Eq, PartialEq)]
pub struct ProjectSummary {
pub name: String,
pub latest_closure: String,
pub latest_closure_datetime: DateTime<Local>
} }
impl AppState<'_> { impl AppState<'_> {
pub fn new(conf: Configuration) -> Self { pub async fn new(conf: Configuration) -> Self {
let rp = "localhost"; let rp = "localhost";
let rp_origin = Url::parse(&conf.url).expect("Invalid URL"); let rp_origin = Url::parse(&conf.url).expect("Invalid URL");
let builder = WebauthnBuilder::new(rp, &rp_origin).expect("Invalid configuration"); let builder = WebauthnBuilder::new(rp, &rp_origin).expect("Invalid configuration");
@ -128,12 +131,26 @@ impl AppState<'_> {
}; };
let mgr = Manager::from_config(pg_config, NoTls, mgr_config); let mgr = Manager::from_config(pg_config, NoTls, mgr_config);
let pool = Pool::builder(mgr).max_size(16).build().unwrap(); let pool = Pool::builder(mgr).max_size(16).build().unwrap();
let access_key = fs::read_to_string(&conf.s3_access_key_filepath)
.unwrap_or_else(|_| format!("Cannot read the S3 access key from {}", &conf.s3_access_key_filepath.clone()));
let secret_key = fs::read_to_string(&conf.s3_secret_key_filepath)
.unwrap_or_else(|_| format!("Cannot read the S3 secret key from {}", &conf.s3_secret_key_filepath.clone()));
let access_key = access_key.strip_suffix("\n").unwrap_or(&access_key);
let secret_key = secret_key.strip_suffix("\n").unwrap_or(&secret_key);
let credentials = Credentials::new(access_key, secret_key, None, None, "nom-nom-provider");
let s3_client_config = aws_config::SdkConfig::builder()
.endpoint_url(conf.s3_endpoint)
.region(Some(Region::new(conf.s3_region)))
.credentials_provider(SharedCredentialsProvider::new(credentials))
.behavior_version(BehaviorVersion::latest())
.build();
let s3_client = aws_sdk_s3::Client::new(&s3_client_config);
AppState { AppState {
webauthn, webauthn,
db: pool, db: pool,
hbs, hbs,
session session,
s3_client
} }
} }
@ -198,13 +215,13 @@ impl AppState<'_> {
pub async fn create_binary_cache(&self, binary_cache: &BinaryCache) -> Result<()> { pub async fn create_binary_cache(&self, binary_cache: &BinaryCache) -> Result<()> {
let conn = self.db.get().await?; let conn = self.db.get().await?;
let stmt = conn.prepare_cached("INSERT INTO BinaryCaches (name, access_key, secret_key, region, endpoint) VALUES ($1, $2, $3, $4, $5)").await?; let stmt = conn.prepare_cached("INSERT INTO BinaryCaches (name, access_key, secret_key, region, endpoint) VALUES ($1, $2, $3, $4, $5)").await?;
let _ = conn.execute(&stmt, &[&binary_cache.name, &binary_cache.access_key, &binary_cache.secret_key, &binary_cache.region, &binary_cache.endpoint]).await?; let _ = conn.execute(&stmt, &[&binary_cache.name, &binary_cache.access_key, &binary_cache.secret_key, &binary_cache.region, &binary_cache.endpoint_url]).await?;
Ok(()) Ok(())
} }
pub async fn create_project(&self, binary_cache: &BinaryCache, project: &Project) -> Result<()> { pub async fn create_project(&self, binary_cache: &BinaryCache, project: &Project) -> Result<()> {
let conn = self.db.get().await?; let conn = self.db.get().await?;
let stmt = conn.prepare_cached("INSERT INTO Projects (name, binary_cache_id) \ let stmt = conn.prepare_cached("INSERT INTO Projects (name, binary_cache_id, 0) \
SELECT $1, b.id FROM BinaryCaches b \ SELECT $1, b.id FROM BinaryCaches b \
WHERE b.name = $2").await?; WHERE b.name = $2").await?;
let _ = conn.execute(&stmt, &[&project.name, &binary_cache.name]).await?; let _ = conn.execute(&stmt, &[&project.name, &binary_cache.name]).await?;
@ -223,16 +240,17 @@ impl AppState<'_> {
pub async fn get_project(&self, token: &ProjectUuid) -> Result<Project> { pub async fn get_project(&self, token: &ProjectUuid) -> Result<Project> {
let conn = self.db.get().await?; let conn = self.db.get().await?;
let stmt = conn.prepare_cached("SELECT name FROM Projects p \ let stmt = conn.prepare_cached("SELECT name, closure_generation FROM Projects p \
INNER JOIN ProjectTokens t ON p.id = t.project_id \ INNER JOIN ProjectTokens t ON p.id = t.project_id \
WHERE t.token = $1").await?; WHERE t.token = $1").await?;
let row = conn.query_one(&stmt, &[&token.0]).await?; let row = conn.query_one(&stmt, &[&token.0]).await?;
Ok(Project { Ok(Project {
name: row.get(0) name: row.get(0),
latest_closure_generation: row.get(1)
}) })
} }
pub async fn get_project_summaries(&self) -> Result<Vec<ProjectSummary>> { /* pub async fn get_project_summaries(&self) -> Result<Vec<ProjectSummary>> {
let conn = self.db.get().await?; let conn = self.db.get().await?;
let stmt = conn.prepare_cached("SELECT p.name, p FROM Projects p \ let stmt = conn.prepare_cached("SELECT p.name, p FROM Projects p \
INNER JOIN Closures c ON c.project_id = p.id").await?; INNER JOIN Closures c ON c.project_id = p.id").await?;
@ -244,5 +262,5 @@ impl AppState<'_> {
latest_closure_datetime: r.get(2) latest_closure_datetime: r.get(2)
}).collect() }).collect()
) )
} }*/
} }

10
src/s3/mod.rs Normal file
View File

@ -0,0 +1,10 @@
use anyhow::{anyhow, Result};
use aws_sdk_s3::{operation::head_bucket::HeadBucketOutput, Client, error::ProvideErrorMetadata};
use crate::models::Configuration;
pub async fn check_bucket(client: &Client, config: &Configuration) -> Result<HeadBucketOutput> {
println!("{}",&config.s3_bucket);
let res = client.head_bucket().bucket(&config.s3_bucket).send().await;
res.map_err(|e| anyhow!("Cannot access the binary cache bucket: {}", e))
}

View File

@ -6,6 +6,7 @@ use clap::Parser;
use nom_nom_gc::handlers; use nom_nom_gc::handlers;
use nom_nom_gc::models::read_config; use nom_nom_gc::models::read_config;
use nom_nom_gc::models; use nom_nom_gc::models;
use nom_nom_gc::s3::check_bucket;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)] #[command(author, version, about, long_about = None)]
@ -23,10 +24,15 @@ async fn main() -> std::io::Result<()> {
let config_path = args.config.unwrap_or("/etc/nom-nom-gc/config.json".to_owned()); let config_path = args.config.unwrap_or("/etc/nom-nom-gc/config.json".to_owned());
let config = read_config(&config_path) let config = read_config(&config_path)
.unwrap_or_else(|e| panic!("Cannot read config file: {}", e)); .unwrap_or_else(|e| panic!("Cannot read config file: {}", e));
let state = models::AppState::new(config); let state = models::AppState::new(config.clone()).await;
println!("Running DB migrations"); println!("Running DB migrations");
state.run_migrations().await.unwrap_or_else(|e| panic!("Db migration error: {}", e)); state.run_migrations().await.unwrap_or_else(|e| panic!("Db migration error: {}", e));
println!("Checking binary cache bucket");
let bucket = check_bucket(&state.s3_client, &config).await;
match bucket {
Ok(_) => println!("Connection to the bucket successful"),
Err(e) => panic!("Cannot connect to the binary cache bucket: {}", e)
}
println!("Server listening to {}", &args.bind); println!("Server listening to {}", &args.bind);
HttpServer::new( HttpServer::new(
move || { move || {
@ -40,6 +46,7 @@ async fn main() -> std::io::Result<()> {
.route("/login/finish", web::post().to(handlers::webauthn_login_finish)) .route("/login/finish", web::post().to(handlers::webauthn_login_finish))
.route("/binary-cache/new", web::get().to(handlers::new_binary_cache)) .route("/binary-cache/new", web::get().to(handlers::new_binary_cache))
.route("/binary-cache/new", web::post().to(handlers::new_binary_cache_post)) .route("/binary-cache/new", web::post().to(handlers::new_binary_cache_post))
.route("/binary-cache/{id}", web::get().to(handlers::get_binary_cache))
}) })
.bind(addr) .bind(addr)
.unwrap() .unwrap()

View File

@ -3,21 +3,7 @@
<a href="/binary-cache/new">New Binary Cache</a> <a href="/binary-cache/new">New Binary Cache</a>
{{#each binaryCaches}} {{#each binaryCaches}}
<div class="binary-cache"> <div class="binary-cache">
<h3>{{this.name}}</h3> <h3><a href="/binary-cache/{{this.id}}">{{this.name}}</a></h3>
<table>
<tr>
<th>Project Name</th>
<th>Latest Closure</th>
<th>Datetime</th>
</tr>
{{#each this.projects}}
<tr>
<td>{{this.name}}</td>
<td>{{this.latestClosure}}</td>
<td>{{this.datetime}}</td>
</tr>
{{/each}}
</table>
</div> </div>
{{/each}} {{/each}}
{{ /template }} {{ /template }}

View File

@ -4,7 +4,7 @@ use handlebars::Handlebars;
use std::{path::PathBuf, sync::Arc}; use std::{path::PathBuf, sync::Arc};
use crate::models::{RegistrationUuid, ProjectSummary}; use crate::models::RegistrationUuid;
pub fn new<'a>() -> Result<Handlebars<'a>, RenderError> { pub fn new<'a>() -> Result<Handlebars<'a>, RenderError> {
let rootpath = PathBuf::from(env!("CARGO_MANIFEST_DIR")); let rootpath = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
@ -30,17 +30,11 @@ pub fn new<'a>() -> Result<Handlebars<'a>, RenderError> {
Ok(hbs) Ok(hbs)
} }
pub fn landing_page(hb: Arc<Handlebars<'_>>, logged: bool, project_summaries: Vec<ProjectSummary>) -> Result<String, RenderError> { pub fn landing_page(hb: Arc<Handlebars<'_>>, logged: bool) -> Result<String, RenderError> {
let data = json!({ let data = json!({
"binaryCaches": [{ "binaryCaches": [{
"name": "NixOS Binary Cache", }]
"projects": project_summaries.into_iter().map(|p| json!({ });
"name": p.name,
"latestClosure": p.latest_closure,
"datetime": p.latest_closure_datetime.to_string()
}
)).collect::<Vec<_>>()
}]});
hb.render("landing", &data) hb.render("landing", &data)
} }

View File

@ -74,10 +74,12 @@ async fn run_test_db(db: &TestDB) -> Result<()> {
access_key: "access key".to_string(), access_key: "access key".to_string(),
secret_key: "secret key".to_string(), secret_key: "secret key".to_string(),
region: "reg-01".to_string(), region: "reg-01".to_string(),
endpoint_url: "localhost:"
}; };
state.create_binary_cache(&binary_cache).await?; state.create_binary_cache(&binary_cache).await?;
let project = Project { let project = Project {
name: "super-duper-project".to_string() name: "super-duper-project".to_string(),
latest_closure_generation: 0
}; };
state.create_project(&binary_cache, &project).await?; state.create_project(&binary_cache, &project).await?;
let token = state.create_project_token(&project).await?; let token = state.create_project_token(&project).await?;