From 6f72e7c2c4efcc91d7136ae5aa99311770807371 Mon Sep 17 00:00:00 2001 From: ibaryshnikov Date: Thu, 28 Oct 2021 23:21:12 +0300 Subject: [PATCH] removed memmap for verifier and contributor commands --- Cargo.lock | 7 - phase1-cli/src/bin/phase1.rs | 32 ++-- phase1-cli/src/contribute.rs | 101 +++-------- phase1-cli/src/new_challenge.rs | 35 +--- .../src/transform_pok_and_correctness.rs | 170 +++++------------- setup1-contributor/Cargo.toml | 1 - setup1-contributor/src/commands/contribute.rs | 82 ++------- setup1-contributor/src/errors.rs | 2 - setup1-contributor/src/utils.rs | 21 +-- setup1-verifier/src/main.rs | 2 +- setup1-verifier/src/utils/mod.rs | 58 ------ setup1-verifier/src/verifier.rs | 152 +++------------- 12 files changed, 147 insertions(+), 516 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e078930a..8c810e29 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1776,12 +1776,6 @@ dependencies = [ "syn", ] -[[package]] -name = "panic-control" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "159973aebc43b4640619042b3bf160e3b6348000a949e37c0806ae6acc79d6c7" - [[package]] name = "parking_lot" version = "0.11.2" @@ -2776,7 +2770,6 @@ dependencies = [ "http", "i18n-embed 0.9.4", "indicatif", - "panic-control", "phase1", "phase1-cli", "phase1-coordinator", diff --git a/phase1-cli/src/bin/phase1.rs b/phase1-cli/src/bin/phase1.rs index dc5b6930..df0f3521 100644 --- a/phase1-cli/src/bin/phase1.rs +++ b/phase1-cli/src/bin/phase1.rs @@ -44,49 +44,59 @@ fn execute_cmd(opts: Phase1Opts) { let now = Instant::now(); match command { Command::New(opt) => { - new_challenge(CHALLENGE_IS_COMPRESSED, &opt.challenge_fname, ¶meters); + let challenge = new_challenge(CHALLENGE_IS_COMPRESSED, ¶meters); + std::fs::write(&opt.challenge_fname, challenge).expect("Unable to create a challenge file"); } Command::Contribute(opt) => { + let challenge = std::fs::read(opt.challenge_fname).expect("unable read challenge file"); // contribute to the randomness let seed = hex::decode(&read_to_string(&opts.seed).expect("should have read seed").trim()) .expect("seed should be a hex string"); let rng = derive_rng_from_seed(&seed); - contribute( + let contribution = contribute( CHALLENGE_IS_COMPRESSED, - &opt.challenge_fname, + &challenge, CONTRIBUTION_IS_COMPRESSED, - &opt.response_fname, CHECK_CONTRIBUTION_INPUT_FOR_CORRECTNESS, ¶meters, rng, ); + std::fs::write(opt.response_fname, contribution).expect("unable to create response file"); } Command::Beacon(opt) => { + let challenge = std::fs::read(opt.challenge_fname).expect("unable read challenge file"); // use the beacon's randomness // Place block hash here (block number #564321) let beacon_hash = hex::decode(&opt.beacon_hash).expect("could not hex decode beacon hash"); let rng = derive_rng_from_seed(&beacon_randomness(from_slice(&beacon_hash))); - contribute( + let contribution = contribute( CHALLENGE_IS_COMPRESSED, - &opt.challenge_fname, + &challenge, CONTRIBUTION_IS_COMPRESSED, - &opt.response_fname, CHECK_CONTRIBUTION_INPUT_FOR_CORRECTNESS, ¶meters, rng, ); + std::fs::write(opt.response_fname, contribution).expect("unable to create response file"); } Command::VerifyAndTransformPokAndCorrectness(opt) => { // we receive a previous participation, verify it, and generate a new challenge from it - transform_pok_and_correctness( + let challenge = + std::fs::read(opt.challenge_fname).expect("Unable to read the challenge file in this directory"); + let response = + std::fs::read(opt.response_fname).expect("Unable to read the response file in this directory"); + + let new_challenge = transform_pok_and_correctness( CHALLENGE_IS_COMPRESSED, - &opt.challenge_fname, + &challenge, CONTRIBUTION_IS_COMPRESSED, - &opt.response_fname, + &response, CHALLENGE_IS_COMPRESSED, - &opt.new_challenge_fname, ¶meters, ); + + std::fs::write(opt.new_challenge_fname, new_challenge) + .expect("Unable to create new challenge file in this directory"); } Command::VerifyAndTransformRatios(opt) => { // we receive a previous participation, verify it, and generate a new challenge from it diff --git a/phase1-cli/src/contribute.rs b/phase1-cli/src/contribute.rs index aed18c5b..f300667d 100644 --- a/phase1-cli/src/contribute.rs +++ b/phase1-cli/src/contribute.rs @@ -3,97 +3,56 @@ use setup_utils::{calculate_hash, CheckForCorrectness, UseCompression}; use snarkvm_curves::PairingEngine as Engine; -use memmap::*; use rand::{CryptoRng, Rng}; -use std::{ - fs::OpenOptions, - io::{Read, Write}, -}; +use std::io::{Read, Write}; pub fn contribute( compressed_input: UseCompression, - challenge_filename: &str, + challenge: &[u8], compressed_output: UseCompression, - response_filename: &str, check_input_correctness: CheckForCorrectness, parameters: &Phase1Parameters, mut rng: impl Rng + CryptoRng, -) { - // Try to load challenge file from disk. - let reader = OpenOptions::new() - .read(true) - .open(challenge_filename) - .expect("unable open challenge file"); - { - let metadata = reader - .metadata() - .expect("unable to get filesystem metadata for challenge file"); - let expected_challenge_length = match compressed_input { - UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, - UseCompression::No => parameters.accumulator_size, - }; - - if metadata.len() != (expected_challenge_length as u64) { - panic!( - "The size of challenge file should be {}, but it's {}, so something isn't right.", - expected_challenge_length, - metadata.len() - ); - } - } - - let readable_map = unsafe { - MmapOptions::new() - .map(&reader) - .expect("unable to create a memory map for input") +) -> Vec { + let expected_challenge_length = match compressed_input { + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; - // Create response file in this directory - let writer = OpenOptions::new() - .read(true) - .write(true) - .create_new(true) - .open(response_filename) - .expect("unable to create response file"); + if challenge.len() != expected_challenge_length { + panic!( + "The size of challenge file should be {}, but it's {}, so something isn't right.", + expected_challenge_length, + challenge.len() + ); + } let required_output_length = match compressed_output { UseCompression::Yes => parameters.contribution_size, UseCompression::No => parameters.accumulator_size + parameters.public_key_size, }; - writer - .set_len(required_output_length as u64) - .expect("must make output file large enough"); - - let mut writable_map = unsafe { - MmapOptions::new() - .map_mut(&writer) - .expect("unable to create a memory map for output") - }; + let mut response = vec![0; required_output_length]; tracing::info!("Calculating previous contribution hash..."); - assert!( - UseCompression::No == compressed_input, + assert_eq!( + UseCompression::No, + compressed_input, "Hashing the compressed file in not yet defined" ); - let current_accumulator_hash = calculate_hash(&readable_map); - - { - tracing::info!("`challenge` file contains decompressed points and has a hash:"); - log_hash(¤t_accumulator_hash); + let current_accumulator_hash = calculate_hash(&challenge); - (&mut writable_map[0..]) - .write_all(¤t_accumulator_hash) - .expect("unable to write a challenge hash to mmap"); + tracing::info!("`challenge` file contains decompressed points and has a hash:"); + log_hash(¤t_accumulator_hash); - writable_map.flush().expect("unable to write hash to response file"); - } + (&mut response[0..]) + .write_all(¤t_accumulator_hash) + .expect("unable to write a challenge hash to mmap"); { let mut challenge_hash = [0; 64]; - let mut memory_slice = readable_map.get(0..64).expect("must read point data from file"); - memory_slice + (&challenge[..]) .read_exact(&mut challenge_hash) .expect("couldn't read hash of challenge file from response file"); @@ -112,8 +71,8 @@ pub fn contribute( // this computes a transformation and writes it Phase1::computation( - &readable_map, - &mut writable_map, + &challenge, + &mut response, compressed_input, compressed_output, check_input_correctness, @@ -126,14 +85,11 @@ pub fn contribute( // Write the public key public_key - .write(&mut writable_map, compressed_output, ¶meters) + .write(&mut response, compressed_output, ¶meters) .expect("unable to write public key"); - writable_map.flush().expect("must flush a memory map"); - // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = calculate_hash(&output_readonly); + let contribution_hash = calculate_hash(&response); tracing::info!( "Done!\n\n\ @@ -142,6 +98,7 @@ pub fn contribute( ); log_hash(&contribution_hash); tracing::info!("Thank you for your participation, much appreciated! :)"); + response } fn log_hash(hash: &[u8]) { diff --git a/phase1-cli/src/new_challenge.rs b/phase1-cli/src/new_challenge.rs index 46bc2878..116c1f87 100644 --- a/phase1-cli/src/new_challenge.rs +++ b/phase1-cli/src/new_challenge.rs @@ -3,62 +3,43 @@ use setup_utils::{blank_hash, calculate_hash, print_hash, UseCompression}; use snarkvm_curves::PairingEngine as Engine; -use memmap::*; -use std::{fs::OpenOptions, io::Write}; +use std::io::Write; pub fn new_challenge( compress_new_challenge: UseCompression, - challenge_filename: &str, parameters: &Phase1Parameters, -) { +) -> Vec { println!( "Will generate an empty accumulator for 2^{} powers of tau", parameters.total_size_in_log2 ); println!("In total will generate up to {} powers", parameters.powers_g1_length); - let file = OpenOptions::new() - .read(true) - .write(true) - .create_new(true) - .open(challenge_filename) - .expect("unable to create challenge file"); - let expected_challenge_length = match compress_new_challenge { UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, UseCompression::No => parameters.accumulator_size, }; - file.set_len(expected_challenge_length as u64) - .expect("unable to allocate large enough file"); - - let mut writable_map = unsafe { - MmapOptions::new() - .map_mut(&file) - .expect("unable to create a memory map") - }; + let mut challenge = vec![0; expected_challenge_length]; // Write a blank BLAKE2b hash: let hash = blank_hash(); - (&mut writable_map[0..]) + (&mut challenge[0..]) .write_all(&hash) .expect("unable to write a default hash to mmap"); - writable_map - .flush() - .expect("unable to write blank hash to challenge file"); println!("Blank hash for an empty challenge:"); print_hash(&hash); - Phase1::initialization(&mut writable_map, compress_new_challenge, ¶meters) + Phase1::initialization(&mut challenge, compress_new_challenge, ¶meters) .expect("generation of initial accumulator is successful"); - writable_map.flush().expect("unable to flush memmap to disk"); // Get the hash of the contribution, so the user can compare later - let output_readonly = writable_map.make_read_only().expect("must make a map readonly"); - let contribution_hash = calculate_hash(&output_readonly); + let contribution_hash = calculate_hash(&challenge); println!("Empty contribution is formed with a hash:"); print_hash(&contribution_hash); println!("Wrote a fresh accumulator to challenge file"); + + challenge } diff --git a/phase1-cli/src/transform_pok_and_correctness.rs b/phase1-cli/src/transform_pok_and_correctness.rs index 3f6710c4..19c7880c 100644 --- a/phase1-cli/src/transform_pok_and_correctness.rs +++ b/phase1-cli/src/transform_pok_and_correctness.rs @@ -3,89 +3,50 @@ use setup_utils::{calculate_hash, print_hash, CheckForCorrectness, UseCompressio use snarkvm_curves::PairingEngine as Engine; -use memmap::*; -use std::{ - fs::{self, OpenOptions}, - io::{Read, Write}, -}; +use std::io::{Read, Write}; pub fn transform_pok_and_correctness( challenge_is_compressed: UseCompression, - challenge_filename: &str, + challenge: &[u8], contribution_is_compressed: UseCompression, - response_filename: &str, + response: &[u8], compress_new_challenge: UseCompression, - new_challenge_filename: &str, parameters: &Phase1Parameters, -) { +) -> Vec { println!( "Will verify and decompress a contribution to accumulator for 2^{} powers of tau", parameters.total_size_in_log2 ); - // Try to load challenge file from disk. - let challenge_reader = OpenOptions::new() - .read(true) - .open(challenge_filename) - .expect("unable open challenge file in this directory"); - - { - let metadata = challenge_reader - .metadata() - .expect("unable to get filesystem metadata for challenge file"); - let expected_challenge_length = match challenge_is_compressed { - UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, - UseCompression::No => parameters.accumulator_size, - }; - if metadata.len() != (expected_challenge_length as u64) { - panic!( - "The size of challenge file should be {}, but it's {}, so something isn't right.", - expected_challenge_length, - metadata.len() - ); - } - } - - let challenge_readable_map = unsafe { - MmapOptions::new() - .map(&challenge_reader) - .expect("unable to create a memory map for input") + let expected_challenge_length = match challenge_is_compressed { + UseCompression::Yes => parameters.contribution_size - parameters.public_key_size, + UseCompression::No => parameters.accumulator_size, }; - - // Try to load response file from disk. - let response_reader = OpenOptions::new() - .read(true) - .open(response_filename) - .expect("unable open response file in this directory"); - - { - let metadata = response_reader - .metadata() - .expect("unable to get filesystem metadata for response file"); - let expected_response_length = match contribution_is_compressed { - UseCompression::Yes => parameters.contribution_size, - UseCompression::No => parameters.accumulator_size + parameters.public_key_size, - }; - if metadata.len() != (expected_response_length as u64) { - panic!( - "The size of response file should be {}, but it's {}, so something isn't right.", - expected_response_length, - metadata.len() - ); - } + if challenge.len() != expected_challenge_length { + panic!( + "The size of challenge file should be {}, but it's {}, so something isn't right.", + expected_challenge_length, + challenge.len() + ); } - let response_readable_map = unsafe { - MmapOptions::new() - .map(&response_reader) - .expect("unable to create a memory map for input") + let expected_response_length = match contribution_is_compressed { + UseCompression::Yes => parameters.contribution_size, + UseCompression::No => parameters.accumulator_size + parameters.public_key_size, }; + if response.len() != expected_response_length { + panic!( + "The size of response file should be {}, but it's {}, so something isn't right.", + expected_response_length, + response.len() + ); + } println!("Calculating previous challenge hash..."); // Check that contribution is correct - let current_accumulator_hash = calculate_hash(&challenge_readable_map); + let current_accumulator_hash = calculate_hash(&challenge); println!("Hash of the `challenge` file for verification:"); print_hash(¤t_accumulator_hash); @@ -93,10 +54,7 @@ pub fn transform_pok_and_correctness( // Check the hash chain - a new response must be based on the previous challenge! { let mut response_challenge_hash = [0; 64]; - let mut memory_slice = response_readable_map - .get(0..64) - .expect("must read point data from file"); - memory_slice + (&response[..]) .read_exact(&mut response_challenge_hash) .expect("couldn't read hash of challenge file from response file"); @@ -108,13 +66,13 @@ pub fn transform_pok_and_correctness( } } - let response_hash = calculate_hash(&response_readable_map); + let response_hash = calculate_hash(&response); println!("Hash of the response file for verification:"); print_hash(&response_hash); // get the contributor's public key - let public_key = PublicKey::read(&response_readable_map, contribution_is_compressed, ¶meters) + let public_key = PublicKey::read(&response, contribution_is_compressed, ¶meters) .expect("wasn't able to deserialize the response file's public key"); // check that it follows the protocol @@ -122,8 +80,8 @@ pub fn transform_pok_and_correctness( println!("Verifying a contribution to contain proper powers and correspond to the public key..."); let res = Phase1::verification( - &challenge_readable_map, - &response_readable_map, + &challenge, + &response, &public_key, ¤t_accumulator_hash, challenge_is_compressed, @@ -142,78 +100,36 @@ pub fn transform_pok_and_correctness( if compress_new_challenge == contribution_is_compressed { println!("Don't need to recompress the contribution, copying the file without the public key..."); - fs::copy(challenge_filename, new_challenge_filename) - .expect("Should have been able to copy the new challenge file"); - let f = fs::File::open(new_challenge_filename).expect("Should have been able to open the new challenge file"); - f.set_len((parameters.accumulator_size + parameters.public_key_size) as u64) - .expect("Should have been able to truncate the new challenge file"); - - let new_challenge_reader = OpenOptions::new() - .read(true) - .open(new_challenge_filename) - .expect("unable open new challenge file in this directory"); - - let new_challenge_readable_map = unsafe { - MmapOptions::new() - .map(&new_challenge_reader) - .expect("unable to create a memory map for new input") - }; + let mut new_challenge = challenge.to_vec(); + new_challenge.resize(parameters.accumulator_size + parameters.public_key_size, 0); - let hash = calculate_hash(&new_challenge_readable_map); + let hash = calculate_hash(&new_challenge); println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:"); print_hash(&hash); println!("Done! new challenge file contains the new challenge file. The other files"); println!("were left alone."); + new_challenge } else { println!("Verification succeeded! Writing to new challenge file..."); - // Create new challenge file in this directory - let writer = OpenOptions::new() - .read(true) - .write(true) - .create_new(true) - .open(new_challenge_filename) - .expect("unable to create new challenge file in this directory"); - - // Recomputation strips the public key and uses hashing to link with the previous contribution after decompression - writer - .set_len(parameters.accumulator_size as u64) - .expect("must make output file large enough"); - - let mut writable_map = unsafe { - MmapOptions::new() - .map_mut(&writer) - .expect("unable to create a memory map for output") - }; - - { - (&mut writable_map[0..]) - .write_all(&response_hash) - .expect("unable to write a default hash to mmap"); - - writable_map - .flush() - .expect("unable to write hash to new challenge file"); - } - - Phase1::decompress( - &response_readable_map, - &mut writable_map, - CheckForCorrectness::No, - ¶meters, - ) - .expect("must decompress a response for a new challenge"); + // Create new challenge + let mut new_challenge = vec![0; parameters.accumulator_size]; - writable_map.flush().expect("must flush the memory map"); + (&mut new_challenge[0..]) + .write_all(&response_hash) + .expect("unable to write a default hash to new challenge"); - let new_challenge_readable_map = writable_map.make_read_only().expect("must make a map readonly"); + Phase1::decompress(&response, &mut new_challenge, CheckForCorrectness::No, ¶meters) + .expect("must decompress a response for a new challenge"); - let recompressed_hash = calculate_hash(&new_challenge_readable_map); + let recompressed_hash = calculate_hash(&new_challenge); println!("Here's the BLAKE2b hash of the decompressed participant's response as new_challenge file:"); print_hash(&recompressed_hash); println!("Done! new challenge file contains the new challenge file. The other files"); println!("were left alone."); + + new_challenge } } diff --git a/setup1-contributor/Cargo.toml b/setup1-contributor/Cargo.toml index c0f40fcc..b1fce5e1 100644 --- a/setup1-contributor/Cargo.toml +++ b/setup1-contributor/Cargo.toml @@ -31,7 +31,6 @@ hex = { version = "0.4" } http = "0.2" i18n-embed = { version = "0.9", features = ["desktop-requester"] } indicatif = { version = "0.15.0" } -panic-control = {version = "0.1.4" } rand = { version = "0.8" } regex = "1" reqwest = "0.11" diff --git a/setup1-contributor/src/commands/contribute.rs b/setup1-contributor/src/commands/contribute.rs index bdd67420..123c971c 100644 --- a/setup1-contributor/src/commands/contribute.rs +++ b/setup1-contributor/src/commands/contribute.rs @@ -6,13 +6,7 @@ use crate::{ confirmation_key::{print_key_and_remove_the_file, ConfirmationKey}, AleoSetupKeys, }, - utils::{ - create_parameters_for_chunk, - get_authorization_value, - read_from_file, - remove_file_if_exists, - sign_contribution_state, - }, + utils::{create_parameters_for_chunk, get_authorization_value, sign_contribution_state}, }; use phase1::helpers::converters::CurveKind; @@ -31,27 +25,15 @@ use anyhow::{Context, Result}; use dialoguer::{theme::ColorfulTheme, Confirm, Input}; use fs_err::File; use indicatif::{ProgressBar, ProgressStyle}; -use panic_control::{spawn_quiet, ThreadResultExt}; use rand::{CryptoRng, Rng}; use regex::Regex; use secrecy::{ExposeSecret, SecretString, SecretVec}; use setup_utils::derive_rng_from_seed; -use std::{ - collections::HashSet, - convert::TryFrom, - io::{Read, Write}, - path::PathBuf, - str::FromStr, - sync::Arc, - time::Duration, -}; +use std::{collections::HashSet, convert::TryFrom, io::Read, path::PathBuf, str::FromStr, sync::Arc, time::Duration}; use tokio::time::{sleep, Instant}; use tracing::{error, info}; use url::Url; -const CHALLENGE_FILENAME: &str = "challenge"; -const RESPONSE_FILENAME: &str = "response"; - const DELAY_AFTER_ERROR: Duration = Duration::from_secs(60); const DELAY_POLL_CEREMONY: Duration = Duration::from_secs(5); const HEARTBEAT_POLL_DELAY: Duration = Duration::from_secs(30); @@ -192,9 +174,6 @@ impl Contribute { let incomplete_chunks = self.get_non_contributed_and_available_chunks(&ceremony); if incomplete_chunks.is_empty() { if non_contributed_chunks.is_empty() { - remove_file_if_exists(CHALLENGE_FILENAME)?; - remove_file_if_exists(RESPONSE_FILENAME)?; - let completed_message = "Finished!"; progress_bar.finish_with_message(completed_message); info!(completed_message); @@ -215,13 +194,13 @@ impl Contribute { progress_bar.set_message(&format!("Contributing to chunk {}...", chunk_id)); - self.download_challenge(chunk_id, lock_response.contribution_id, CHALLENGE_FILENAME, auth_rng) + let challenge_file = self + .download_challenge(chunk_id, lock_response.contribution_id, auth_rng) .await?; let exposed_seed = self.seed.expose_secret(); let seeded_rng = derive_rng_from_seed(&exposed_seed[..]); let start = Instant::now(); - remove_file_if_exists(RESPONSE_FILENAME)?; // Fetch parameters required for contribution. let parameters = create_parameters_for_chunk::(&self.environment, chunk_id as usize)?; @@ -230,32 +209,18 @@ impl Contribute { let check_input_correctness = self.environment.check_input_for_correctness(); // Run the contribution. - let h = spawn_quiet(move || { - contribute( - compressed_input, - CHALLENGE_FILENAME, - compressed_output, - RESPONSE_FILENAME, - check_input_correctness, - ¶meters, - seeded_rng, - ); - }); - let result = h.join(); - if result.is_err() { - if let Some(panic_value) = result.panic_value_as_str() { - error!("Contribute failed: {}", panic_value); - } - return Err(ContributeError::FailedRunningContributeError.into()); - } + let response_file = contribute( + compressed_input, + &challenge_file, + compressed_output, + check_input_correctness, + ¶meters, + seeded_rng, + ); let duration = start.elapsed(); info!("Completed chunk {} in {} seconds", chunk_id, duration.as_secs()); - // Read the challenge and response files. - let challenge_file = read_from_file(CHALLENGE_FILENAME)?; - let response_file = read_from_file(RESPONSE_FILENAME)?; - // Hash the challenge and response files. let challenge_hash = calculate_hash(&challenge_file); let response_hash = calculate_hash(&response_file); @@ -265,11 +230,6 @@ impl Contribute { let signed_contribution_state = sign_contribution_state(&view_key.to_string(), &challenge_hash, &response_hash, None, auth_rng)?; - // Construct the serialized response - let mut file = File::open(RESPONSE_FILENAME)?; - let mut response_file = Vec::new(); - file.read_to_end(&mut response_file)?; - // Concatenate the signed contribution data and next challenge file. let verifier_flag = [0]; let signature_bytes = hex::decode(signed_contribution_state.get_signature())?; @@ -378,27 +338,23 @@ impl Contribute { &self, chunk_id: u64, contribution_id: u64, - file_path: &str, auth_rng: &mut R, - ) -> Result<()> { + ) -> Result> { let download_path = format!("/v1/download/challenge/{}/{}", chunk_id, contribution_id); let download_path_url = self.server_url.join(&download_path)?; let client = reqwest::Client::new(); let authorization = get_authorization_value(&self.private_key, "GET", &download_path, auth_rng)?; - let mut response = client + let challenge = client .get(download_path_url.as_str()) .header(http::header::AUTHORIZATION, authorization) .send() .await? - .error_for_status()?; - - remove_file_if_exists(file_path)?; - let mut out = File::create(file_path)?; - while let Some(chunk) = response.chunk().await? { - out.write_all(&chunk)?; - } + .error_for_status()? + .bytes() + .await? + .to_vec(); - Ok(()) + Ok(challenge) } async fn upload_response( diff --git a/setup1-contributor/src/errors.rs b/setup1-contributor/src/errors.rs index bf721600..5b328484 100644 --- a/setup1-contributor/src/errors.rs +++ b/setup1-contributor/src/errors.rs @@ -4,8 +4,6 @@ use thiserror::Error; pub enum ContributeError { #[error("Could not read passphrase")] CouldNotReadPassphraseError, - #[error("Failed running contribute")] - FailedRunningContributeError, #[error("Unsupported decryptor")] UnsupportedDecryptorError, } diff --git a/setup1-contributor/src/utils.rs b/setup1-contributor/src/utils.rs index e1226aef..47526d22 100644 --- a/setup1-contributor/src/utils.rs +++ b/setup1-contributor/src/utils.rs @@ -12,29 +12,10 @@ use anyhow::Result; #[cfg(test)] use fs_err::{create_dir_all, write}; use rand::{CryptoRng, Rng}; -use std::{ - convert::TryFrom, - fs::{remove_file, File}, - io::Read, - path::Path, - str::FromStr, -}; +use std::{convert::TryFrom, str::FromStr}; #[cfg(test)] use tracing::error; -pub fn remove_file_if_exists(file_path: &str) -> Result<()> { - if Path::new(file_path).exists() { - remove_file(file_path)?; - } - Ok(()) -} - -pub fn read_from_file(file_name: &str) -> Result> { - let mut file = vec![]; - File::open(file_name)?.read_to_end(&mut file)?; - Ok(file) -} - /// /// This function creates the `file_path`'s parent directories if it /// does not already exists. diff --git a/setup1-verifier/src/main.rs b/setup1-verifier/src/main.rs index c32d685d..a92b7461 100644 --- a/setup1-verifier/src/main.rs +++ b/setup1-verifier/src/main.rs @@ -62,7 +62,7 @@ async fn request_coordinator_public_settings(coordinator_url: &Url) -> anyhow::R async fn main() { let options = Options::from_args(); - crate::utils::init_logger(); + crate::utils::logger::init_logger(); let public_settings = request_coordinator_public_settings(&options.api_url) .await diff --git a/setup1-verifier/src/utils/mod.rs b/setup1-verifier/src/utils/mod.rs index 96c233f3..8a05b5dc 100644 --- a/setup1-verifier/src/utils/mod.rs +++ b/setup1-verifier/src/utils/mod.rs @@ -2,61 +2,3 @@ pub mod authentication; pub use authentication::*; pub mod logger; -pub use logger::*; - -use std::{fs, path::Path}; -use tracing::{error, trace}; - -/// -/// This function writes a `file_bytes` to the `locator` path. -/// -/// If a parent directory doesn't already exists, this function will -/// automatically generate one. -/// -pub fn write_to_file(locator: &str, file_bytes: Vec) { - create_parent_directory(locator); - remove_file_if_exists(locator); - - if let Err(err) = fs::write(locator, file_bytes) { - error!("Error writing file to path {} {}", &locator, err); - } -} - -// /// -// /// This function reads the bytes from a file at a given path. -// /// -// pub fn read_from_file(locator: &str) -> anyhow::Result> { -// let mut buffer = Vec::new(); -// let mut file = fs::File::open(locator)?; -// file.read_to_end(&mut buffer)?; -// -// Ok(buffer) -// } - -/// -/// This function creates the `locator` path's parent directories if it -/// does not already exists. -/// -pub fn create_parent_directory(locator: &str) { - let locator_path = Path::new(&locator); - if let Some(locator_path_parent) = locator_path.parent() { - if let Err(err) = fs::create_dir_all(locator_path_parent) { - error!( - "Error initializing locator parent directory {:?} {}", - &locator_path_parent, err - ); - } - } -} - -/// -/// This function removes a file if it exists in the filesystem. -/// -pub fn remove_file_if_exists(file_path: &str) { - if Path::new(file_path).exists() { - trace!("Removing file {}", file_path); - if let Err(err) = fs::remove_file(file_path) { - error!("Error removing file {} {}", &file_path, err); - } - } -} diff --git a/setup1-verifier/src/verifier.rs b/setup1-verifier/src/verifier.rs index 88765361..2bc62746 100644 --- a/setup1-verifier/src/verifier.rs +++ b/setup1-verifier/src/verifier.rs @@ -1,5 +1,4 @@ use std::{ - fs, str::FromStr, time::{Duration, Instant}, }; @@ -20,10 +19,7 @@ use serde::{Deserialize, Serialize}; use tracing::{debug, error, info}; use url::Url; -use crate::{ - errors::VerifierError, - utils::{authentication::AleoAuthentication, create_parent_directory, remove_file_if_exists, write_to_file}, -}; +use crate::{errors::VerifierError, utils::authentication::AleoAuthentication}; const NO_TASKS_DELAY: Duration = Duration::from_secs(5); const UPLOAD_TASK_ERROR_DELAY: Duration = Duration::from_secs(5); @@ -103,130 +99,34 @@ impl Verifier { }) } - /// - /// Downloads the challenge file from the coordinator and stores it to the verifier filesystem. - /// Returns the hash of the downloaded response file. Otherwise, returns a `VerifierError` - /// - pub async fn process_challenge_file( - &self, - chunk_id: u64, - contribution_id: u64, - challenge_locator: &str, - ) -> Result<[u8; 64], VerifierError> { - // Download the challenge file from the coordinator. - let challenge_file = self.download_challenge_file(chunk_id, contribution_id).await?; - - // Compute the challenge hash using the challenge file. - let challenge_hash = calculate_hash(&challenge_file); - - debug!( - "Writing the challenge file (size: {}) {} to disk", - challenge_file.len(), - &challenge_locator - ); - - // Write the challenge file to disk. - fs::write(&challenge_locator, challenge_file)?; - - debug!("The challenge hash is {}", pretty_hash(&challenge_hash)); - - Ok(challenge_hash) - } - - /// - /// Downloads the response file from the coordinator and stores it to the verifier filesystem. - /// Returns the hash of the downloaded response file. Otherwise, returns a `VerifierError` - /// - pub async fn process_response_file( - &self, - chunk_id: u64, - contribution_id: u64, - response_locator: &str, - ) -> Result<[u8; 64], VerifierError> { - // Download the response file from the coordinator. - let response_file = self.download_response_file(chunk_id, contribution_id).await?; - - // Compute the response hash using the response file. - let response_hash = calculate_hash(&response_file); - - debug!( - "Writing the response file (size: {}) {} to disk", - response_file.len(), - &response_locator - ); - - // Write the response to a local file - write_to_file(&response_locator, response_file); - - debug!("The response hash is {}", pretty_hash(&response_hash)); - - Ok(response_hash) - } - - /// - /// Returns the next challenge file and the hash of the next challenge file at the given locator. - /// Otherwise, returns a `VerifierError` - /// - pub async fn read_next_challenge_file( - &self, - next_challenge_locator: &str, - ) -> Result<(Vec, [u8; 64]), VerifierError> { - info!("Reading the next challenge locator at {}", &next_challenge_locator); - - let next_challenge_file = fs::read(&next_challenge_locator)?; - - // Compute the next challenge hash using the next challenge file. - let next_challenge_hash = calculate_hash(&next_challenge_file); - - debug!("The next challenge hash is {}", pretty_hash(&next_challenge_hash)); - - Ok((next_challenge_file, next_challenge_hash)) - } - /// /// Performs verification on a contribution with the given chunk id and file locators. /// Returns the time (in milliseconds) it took for verification to execute. /// - pub fn run_verification( - &self, - chunk_id: u64, - challenge_file_locator: &str, - response_locator: &str, - next_challenge_locator: &str, - ) -> u128 { - // Create the parent directory for the `next_challenge_locator` if it doesn't already exist. - create_parent_directory(&next_challenge_locator); - // Remove the `next_challenge_locator` if it already exists. - remove_file_if_exists(&next_challenge_locator); - + pub fn run_verification(&self, chunk_id: u64, challenge: &[u8], response: &[u8]) -> Vec { let settings = self.environment.parameters(); let compressed_challenge = self.environment.compressed_inputs(); let compressed_response = self.environment.compressed_outputs(); - let start = Instant::now(); match settings.curve() { CurveKind::Bls12_377 => transform_pok_and_correctness( compressed_challenge, - &challenge_file_locator, + &challenge, compressed_response, - &response_locator, + &response, compressed_challenge, - &next_challenge_locator, &phase1_chunked_parameters!(Bls12_377, settings, chunk_id), ), CurveKind::BW6 => transform_pok_and_correctness( compressed_challenge, - &challenge_file_locator, + &challenge, compressed_response, - &response_locator, + &response, compressed_challenge, - &next_challenge_locator, &phase1_chunked_parameters!(BW6_761, settings, chunk_id), ), - }; - - start.elapsed().as_millis() + } } /// @@ -326,7 +226,7 @@ impl Verifier { info!("Got a task: {:?}", task); // Run the verification operations. - if let Err(error) = self.try_verify(&task).await { + if let Err(error) = self.verify(&task).await { error!("Error while verifying {}", error); tokio::time::sleep(UPLOAD_TASK_ERROR_DELAY).await; } @@ -347,47 +247,45 @@ impl Verifier { /// 9. Attempts to apply the verification in the ceremony /// - Request to the coordinator to run `try_verify` /// - pub async fn try_verify(&self, task: &AssignedTask) -> Result<(), VerifierError> { + pub async fn verify(&self, task: &AssignedTask) -> Result<(), VerifierError> { let chunk_id = task.chunk_id; let contribution_id = task.contribution_id; - let challenge_locator = "challenge"; - let response_locator = "response"; - let next_challenge_locator = "next_challenge"; - // Download and process the challenge file. - let challenge_hash = self - .process_challenge_file(chunk_id, contribution_id, &challenge_locator) - .await?; + // Download the challenge file from the coordinator + let challenge = self.download_challenge_file(chunk_id, contribution_id).await?; + let challenge_hash = calculate_hash(&challenge); + debug!("The challenge hash is {}", pretty_hash(&challenge_hash)); - // Download and process the response file. - let response_hash = self - .process_response_file(chunk_id, contribution_id, &response_locator) - .await?; + // Download the response file from the coordinator + let response = self.download_response_file(chunk_id, contribution_id).await?; + let response_hash = calculate_hash(&response); + debug!("The response hash is {}", pretty_hash(&response_hash)); - // Run verification on a chunk with the given locators. info!( "Running verification on chunk {} contribution {}", chunk_id, contribution_id ); - let duration = self.run_verification(chunk_id, &challenge_locator, &response_locator, &next_challenge_locator); + let start = Instant::now(); + let next_challenge = self.run_verification(chunk_id, &challenge, &response); + let duration = start.elapsed().as_millis(); info!( "Verification on chunk {} contribution {} completed in {} ms", chunk_id, contribution_id, duration, ); - // Fetch the next challenge file from the filesystem. - let (next_challenge_file, next_challenge_hash) = self.read_next_challenge_file(&next_challenge_locator).await?; + let next_challenge_hash = calculate_hash(&next_challenge); + debug!("The next challenge hash is {}", pretty_hash(&next_challenge_hash)); - // Verify that the next challenge file stores the correct response hash. - self.verify_response_hash(&next_challenge_file, &response_hash)?; + // Verify that the next challenge stores the correct response hash. + self.verify_response_hash(&next_challenge, &response_hash)?; // Construct a signature and serialize the contribution. let signature_and_next_challenge_bytes = self.serialize_contribution_and_signature( &challenge_hash, &response_hash, &next_challenge_hash, - next_challenge_file, + next_challenge, )?; // Upload the signature and new challenge file