diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 27218d79aafe..ea0e3e53ef64 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -13,6 +13,7 @@ zksync_vm_benchmark_harness.workspace = true rand.workspace = true vise.workspace = true tokio.workspace = true +hex.workspace = true [dev-dependencies] criterion.workspace = true diff --git a/core/tests/vm-benchmark/benches/criterion.rs b/core/tests/vm-benchmark/benches/criterion.rs index c1b660823b84..8ff771987b2d 100644 --- a/core/tests/vm-benchmark/benches/criterion.rs +++ b/core/tests/vm-benchmark/benches/criterion.rs @@ -7,45 +7,101 @@ use criterion::{ use zksync_types::Transaction; use zksync_vm_benchmark_harness::{ cut_to_allowed_bytecode_size, get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, - get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, Lambda, - Legacy, LoadTestParams, + get_load_test_tx, get_realistic_load_test_tx, BenchmarkingVm, BenchmarkingVmFactory, Fast, + Lambda, Legacy, LoadTestParams, }; const SAMPLE_SIZE: usize = 20; const ZKSYNC_HOME: &str = std::env!("ZKSYNC_HOME"); +// fn benches_in_folder(c: &mut Criterion) { +// let mut group = c.benchmark_group(VM::LABEL.as_str()); +// group +// .sample_size(SAMPLE_SIZE) +// .measurement_time(Duration::from_secs(10)); +// +// let benches = format!( +// "{}/core/tests/vm-benchmark/deployment_benchmarks", +// ZKSYNC_HOME +// ); +// +// for path in std::fs::read_dir(&benches).unwrap() { +// let path = path.unwrap().path(); +// +// let test_contract = std::fs::read(&path).expect("failed to read file"); +// +// let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); +// let tx = get_deploy_tx(code); +// let file_name = path.file_name().unwrap().to_str().unwrap(); +// let full_suffix = if FULL { "/full" } else { "" }; +// let bench_name = format!("{file_name}{full_suffix}"); +// group.bench_function(bench_name, |bencher| { +// if FULL { +// // Include VM initialization / drop into the measured time +// bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); +// } else { +// bencher.iter_batched( +// BenchmarkingVm::::default, +// |mut vm| { +// let result = vm.run_transaction(black_box(&tx)); +// (vm, result) +// }, +// BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one +// ); +// } +// }); +// } +// } +// +pub fn program_from_file(bin_path: &str) -> Vec { + let program = std::fs::read(bin_path).unwrap(); + let encoded = String::from_utf8(program).unwrap(); + + if &encoded[..2] != "0x" { + panic!("Wrong hex"); + } + + let bin = hex::decode(&encoded[2..]).unwrap(); + + bin +} +// Simpler version fn benches_in_folder(c: &mut Criterion) { let mut group = c.benchmark_group(VM::LABEL.as_str()); + group .sample_size(SAMPLE_SIZE) .measurement_time(Duration::from_secs(10)); + let send_bench_tag = "send"; + let send_bench = format!( + "{}/core/tests/vm-benchmark/deployment_benchmarks/{}", + ZKSYNC_HOME, send_bench_tag + ); - let benches = format!("{}/core/tests/vm-benchmark/deployment_benchmarks", ZKSYNC_HOME); - - for path in std::fs::read_dir(&benches).unwrap() { - let path = path.unwrap().path(); - - let test_contract = std::fs::read(&path).expect("failed to read file"); + let fibonacci_bench_tag = "fibonacci_rec"; + let fibonacci_bench = format!( + "{}/core/tests/vm-benchmark/deployment_benchmarks/{}", + ZKSYNC_HOME, fibonacci_bench_tag + ); - let code = cut_to_allowed_bytecode_size(&test_contract).unwrap(); - let tx = get_deploy_tx(code); - let file_name = path.file_name().unwrap().to_str().unwrap(); - let full_suffix = if FULL { "/full" } else { "" }; - let bench_name = format!("{file_name}{full_suffix}"); + let benches: Vec<(&str, String)> = vec![ + (send_bench_tag, send_bench), + (fibonacci_bench_tag, fibonacci_bench), + ]; + for (bench_tag, bench_path) in benches { + let bench_name = format!("{bench_tag}/full"); + // Only benchmark the tx execution itself + let code = program_from_file(&bench_path); + let tx = get_deploy_tx(&code[..]); group.bench_function(bench_name, |bencher| { - if FULL { - // Include VM initialization / drop into the measured time - bencher.iter(|| BenchmarkingVm::::default().run_transaction(black_box(&tx))); - } else { - bencher.iter_batched( - BenchmarkingVm::::default, - |mut vm| { - let result = vm.run_transaction(black_box(&tx)); - (vm, result) - }, - BatchSize::LargeInput, // VM can consume significant amount of RAM, especially the new one - ); - } + bencher.iter_batched( + BenchmarkingVm::::default, + |mut vm| { + let result = vm.run_transaction(black_box(&tx)); + (vm, result) + }, + BatchSize::LargeInput, + ); }); } }