Skip to content
This repository was archived by the owner on Oct 4, 2019. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 3 additions & 6 deletions jsontests/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
name = "jsontests"
version = "0.0.0"
license = "Apache-2.0"
authors = ["Stewart Mackenzie <setori88@gmail.com>", "Wei Tang <hi@that.world>"]
authors = ["Stewart Mackenzie <setori88@gmail.com>", "Wei Tang <hi@that.world>", "Mike Lubinets <public@mersinvald.me>"]
edition = "2018"

[[bench]]
Expand All @@ -20,10 +20,7 @@ env_logger = "0.5.11"
sha3 = "0.6"
ethereum-rlp = { version = "0.2", default-features = false }
criterion = "0.2.5"

[features]
default = []
bench = []
failure = "0.1.5"

[target.'cfg(unix)'.dependencies]
gag = "0.1.10"
gag = "0.1.10"
8 changes: 5 additions & 3 deletions jsontests/benches/performance.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,10 @@ use std::time::Duration;

#[derive(JsonTests)]
#[directory = "jsontests/res/files/eth/VMTests/vmPerformance"]
#[test_with = "jsontests::util::run_test"]
#[bench_with = "jsontests::util::run_bench"]
#[test_with = "jsontests::vmtests::run_test"]
#[bench_with = "jsontests::vmtests::run_bench"]
#[criterion_config = "criterion_cfg"]
struct _Performance;
struct Performance;

pub fn criterion_cfg() -> Criterion {
// Due to poor SputnikVM performance, there's no chance to get a lot of measurements
Expand All @@ -25,3 +25,5 @@ pub fn criterion_cfg() -> Criterion {
.measurement_time(Duration::from_secs(10))
.noise_threshold(0.07)
}

criterion_main!(Performance_bench_main);
1 change: 1 addition & 0 deletions jsontests/jsontests-derive/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,5 @@ serde_json = "1.0"
failure = "0.1.2"
itertools = "0.7.8"
criterion = "0.2.5"
rayon = "1.0.3"

26 changes: 22 additions & 4 deletions jsontests/jsontests-derive/src/attr.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,26 @@ use syn::Ident;
use syn::Lit::Str;
use syn::MetaItem;

#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Runtime {
Static,
Dynamic,
}

impl Default for Runtime {
fn default() -> Self {
Runtime::Dynamic
}
}

#[derive(Default)]
pub struct Config {
pub directory: String,
pub test_with: ExternalRef,
pub bench_with: Option<ExternalRef>,
pub criterion_config: Option<ExternalRef>,
pub patch: Option<ExternalRef>,
pub runtime: Runtime,
pub skip: bool,
pub should_panic: bool,
}
Expand Down Expand Up @@ -46,16 +59,13 @@ pub fn extract_attrs(ast: &syn::DeriveInput) -> Result<Config, Error> {
#[derive(JsonTests)]\n\
#[directory = \"../tests/testset\"]\n\
#[test_with = \"test::test_function\"]\n\
#[runtime = \"static|dynamic\"] (Optional, default = dynamic)\n\
#[bench_wuth = \"test::bench_function\"] (Optional)\n\
#[patch = \"CustomTestPatch\" (Optional)\n\
#[skip] (Optional)\n\
#[should_panic] (Optional)\n\
struct TestSet;";

if ast.attrs.len() < 2 || ast.attrs.len() > 6 {
return Err(failure::err_msg(ERROR_MSG));
}

let config = ast
.attrs
.iter()
Expand All @@ -81,6 +91,14 @@ pub fn extract_attrs(ast: &syn::DeriveInput) -> Result<Config, Error> {
patch: Some(ExternalRef::from(value.clone())),
..config
},
"runtime" => Config {
runtime: match value.as_ref() {
"static" => Runtime::Static,
"dynamic" => Runtime::Dynamic,
_ => panic!("{}", ERROR_MSG),
},
..config
},
_ => panic!("{}", ERROR_MSG),
},
MetaItem::Word(ref ident) => match ident.as_ref() {
Expand Down
213 changes: 144 additions & 69 deletions jsontests/jsontests-derive/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,30 @@ use proc_macro::TokenStream;
use syn::Ident;

use self::{
attr::{extract_attrs, Config},
attr::{extract_attrs, Config, Runtime},
tests::read_tests_from_dir,
util::*,
};
use crate::tests::{Test, TestAST, TestASTRunner};
use std::ffi::OsStr;
use std::path::Path;

#[proc_macro_derive(
JsonTests,
attributes(directory, test_with, bench_with, criterion_config, skip, should_panic, patch)
attributes(
directory,
test_with,
bench_with,
criterion_config,
skip,
should_panic,
patch,
runtime
)
)]
pub fn json_tests(input: TokenStream) -> TokenStream {
let timer = Timer::new("JsonTests proc-macro");

// Construct a string representation of the type definition
let s = input.to_string();

Expand All @@ -42,70 +56,93 @@ pub fn json_tests(input: TokenStream) -> TokenStream {

fn impl_json_tests(ast: &syn::DeriveInput) -> Result<quote::Tokens, Error> {
let config = extract_attrs(&ast)?;
let tests = read_tests_from_dir(&config.directory)?;
let tests = read_tests_from_dir(&config, &config.directory)?;
let mut tokens = quote::Tokens::new();
let mut bench_idents = Vec::new();

// split tests into groups by filepath
let tests = tests.group_by(|test| test.path.clone());

let dir_mod_name = open_directory_module(&config, &mut tokens);
let struct_ident = &ast.ident;

// If behchmarking support is requested, import Criterion
if config.bench_with.is_some() {
tokens.append(quote! {
use criterion::Criterion;
use criterion::Criterion as _;
})
}

for (filepath, tests) in &tests {
// If tests count in this file is 1, we don't need submodule
let tests = tests.collect::<Vec<_>>();
let need_file_submodule = tests.len() > 1;
let mut file_mod_name = None;
if need_file_submodule {
file_mod_name = Some(open_file_module(&filepath, &mut tokens));
// If behchmarking support is requested, import Criterion
if config.bench_with.is_some() {
tokens.append(quote! {
use criterion::Criterion;
})
}
}
let mut ast_runner = AstRunner {
config: &config,
tokens,
bench_idents: Vec::new(),
modules: Vec::new(),
};

// Generate test function
for test in tests {
let name = sanitize_ident(&test.name);
let name_ident = Ident::from(name.as_ref());
let data = json::to_string(&test.data)?;

generate_test(&config, &name_ident, &data, &mut tokens);
generate_bench(&config, &name_ident, &data, &mut tokens).map(|mut ident| {
// prepend dir submodule
ident = Ident::from(format!("{}::{}", dir_mod_name, ident.as_ref()));
// prepend file submodule
if need_file_submodule {
ident = Ident::from(format!("{}::{}", file_mod_name.as_ref().unwrap(), ident.as_ref()));
}
bench_idents.push(ident);
});
}
let traverse_timer = Timer::new("TestAST traverse");
tests.traverse(&mut ast_runner);
drop(traverse_timer);

if need_file_submodule {
// Close file module
close_brace(&mut tokens)
}
let mut tokens = ast_runner.tokens;

generate_criterion_macros(&config, &struct_ident, &ast_runner.bench_idents, &mut tokens);

Ok(tokens)
}

struct AstRunner<'a> {
config: &'a Config,
tokens: quote::Tokens,
modules: Vec<String>,
bench_idents: Vec<Ident>,
}

impl AstRunner<'_> {
fn push_module(&mut self, name: String) {
let mod_name = sanitize_ident(&name);
self.modules.push(mod_name.clone());
open_module(mod_name, &mut self.tokens);
}

// Close directory module
close_brace(&mut tokens);
fn pop_module(&mut self) {
self.modules.pop();
close_brace(&mut self.tokens)
}
}

generate_criterion_macros(&config, &bench_idents, &mut tokens);
impl TestASTRunner for AstRunner<'_> {
fn handle_test(&mut self, test: Test) {
let data = test.data.map(|d| json::to_string(&d).unwrap());
let name = sanitize_ident(&test.name);
let name_ident = Ident::from(name.as_ref());
generate_test(&self.config, &test.path, &name_ident, &data, &mut self.tokens);
generate_bench(&self.config, &test.path, &name_ident, &data, &mut self.tokens).map(|mut ident| {
// prepare sumbodule path
let modules_chain = self.modules.join("::");
let bench_ident = format!("{}::{}", modules_chain, ident);
self.bench_idents.push(bench_ident.into());
});
}

Ok(tokens)
fn handle_open_module(&mut self, name: String, _nodes: &[TestAST]) {
self.push_module(name);
}

fn handle_close_module(&mut self) {
self.pop_module()
}

fn handle_open_test_file(&mut self, name: String, _nodes: &[TestAST]) {
self.push_module(name);
}

fn handle_close_test_file(&mut self) {
self.pop_module()
}
}

fn generate_test(config: &Config, test_name: &Ident, data: &str, tokens: &mut quote::Tokens) {
fn generate_test(
config: &Config,
path: impl AsRef<Path>,
test_name: &Ident,
data: &Option<String>,
tokens: &mut quote::Tokens,
) {
let test_func_path = &config.test_with.path;
let test_func_name = &config.test_with.name;
let test_name_str = test_name.as_ref();
Expand All @@ -116,17 +153,38 @@ fn generate_test(config: &Config, test_name: &Ident, data: &str, tokens: &mut qu
tokens.append(quote! {#[should_panic]});
}

tokens.append(quote! {
fn #test_name() {
use #test_func_path;
use #patch_path;
let data = #data;
#test_func_name::<#patch_name>(#test_name_str, data);
match config.runtime {
Runtime::Static => {
let data = data.as_ref().unwrap();
tokens.append(quote! {
pub(crate) fn #test_name() {
use #test_func_path;
use #patch_path;
let data = #data;
#test_func_name::<#patch_name>(#test_name_str, data);
}
});
}
Runtime::Dynamic => {
let path = path.as_ref().to_str().unwrap();
tokens.append(quote! {
pub(crate) fn #test_name() {
use #test_func_path;
use #patch_path;
jsontests::run_tests_from_file(#path, #test_func_name::<#patch_name>)
}
});
}
});
}
}

fn generate_bench(config: &Config, test_name: &Ident, data: &str, tokens: &mut quote::Tokens) -> Option<Ident> {
fn generate_bench(
config: &Config,
path: impl AsRef<Path>,
test_name: &Ident,
data: &Option<String>,
tokens: &mut quote::Tokens,
) -> Option<Ident> {
if config.bench_with.is_none() {
return None;
}
Expand All @@ -140,33 +198,50 @@ fn generate_bench(config: &Config, test_name: &Ident, data: &str, tokens: &mut q

let (patch_name, patch_path) = derive_patch(config);

tokens.append(quote! {
pub fn #bench_ident(c: &mut Criterion) {
use #bench_func_path;
use #patch_path;
let data = #data;
#bench_func_name::<#patch_name>(c, #bench_name, data);
match config.runtime {
Runtime::Static => {
let data = data.as_ref().unwrap();
tokens.append(quote! {
pub(crate) fn #bench_ident(c: &mut criterion::Criterion) {
use #bench_func_path;
use #patch_path;
let data = #data;
#bench_func_name::<#patch_name>(c, #bench_name, data);
}
});
}
});
Runtime::Dynamic => {
let path = path.as_ref().to_str().unwrap();
tokens.append(quote! {
pub(crate) fn #bench_ident(c: &mut criterion::Criterion) {
use #bench_func_path;
use #patch_path;
jsontests::run_bench_from_file(c, #path, #bench_func_name::<#patch_name>)
}
});
}
}

Some(bench_ident)
}

fn generate_criterion_macros(config: &Config, benches: &[Ident], tokens: &mut quote::Tokens) {
fn generate_criterion_macros(config: &Config, group_name: &Ident, benches: &[Ident], tokens: &mut quote::Tokens) {
let group_name = format!("{}_bench_main", group_name);
let group_name = Ident::from(sanitize_ident(&group_name));
// Generate criterion macros
if config.bench_with.is_some() {
let benches = benches.iter().map(AsRef::as_ref).join(" , ");
let config = config
.criterion_config
.as_ref()
.map(|cfg| cfg.path.clone())
.unwrap_or_else(|| Ident::from("Criterion::default"));
.unwrap_or_else(|| Ident::from("criterion::Criterion::default"));
let template = quote! {
criterion_group! {
name = main;
name = #group_name;
config = #config();
targets = TARGETS
};
}
};
tokens.append(template.as_ref().replace("TARGETS", &benches));
}
Expand Down
Loading