From 09bfb573c36895689bbed157c7e08e09df58c60f Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Wed, 18 Jun 2025 17:18:49 +0100 Subject: [PATCH 1/9] feat: criterion benches --- Cargo.lock | 110 +++++++++++++++++++ Cargo.toml | 1 + benches/Cargo.toml | 34 ++++++ benches/forge_build_no_cache.rs | 33 ++++++ benches/forge_build_with_cache.rs | 36 +++++++ benches/forge_test.rs | 36 +++++++ benches/src/lib.rs | 171 ++++++++++++++++++++++++++++++ 7 files changed, 421 insertions(+) create mode 100644 benches/Cargo.toml create mode 100644 benches/forge_build_no_cache.rs create mode 100644 benches/forge_build_with_cache.rs create mode 100644 benches/forge_test.rs create mode 100644 benches/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 4ebb5c3ed4097..812228968ba4b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -929,6 +929,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "annotate-snippets" version = "0.11.5" @@ -2344,6 +2350,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cast" version = "1.2.3" @@ -2486,8 +2498,10 @@ checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-link", ] @@ -2949,6 +2963,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast 0.3.0", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast 0.3.0", + "itertools 0.10.5", +] + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -4030,6 +4080,22 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-bench" +version = "0.1.0" +dependencies = [ + "chrono", + "criterion", + "eyre", + "foundry-compilers", + "foundry-config", + "foundry-test-utils", + "serde", + "serde_json", + "tempfile", + "tokio", +] + [[package]] name = "foundry-block-explorers" version = "0.18.0" @@ -6467,6 +6533,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "op-alloy-consensus" version = "0.17.2" @@ -6930,6 +7002,34 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "portable-atomic" version = "1.11.1" @@ -9292,6 +9392,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.9.0" diff --git a/Cargo.toml b/Cargo.toml index d635ea33ac0fc..d3d5d99f25178 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "benches/", "crates/anvil/", "crates/anvil/core/", "crates/anvil/rpc/", diff --git a/benches/Cargo.toml b/benches/Cargo.toml new file mode 100644 index 0000000000000..4d13048ad860b --- /dev/null +++ b/benches/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "foundry-bench" +version = "0.1.0" +edition = "2021" + +[[bench]] +name = "forge_test" +path = "forge_test.rs" +harness = false + +[[bench]] +name = "forge_build_no_cache" +path = "forge_build_no_cache.rs" +harness = false + +[[bench]] +name = "forge_build_with_cache" +path = "forge_build_with_cache.rs" +harness = false + +[dependencies] +criterion = { version = "0.5", features = ["html_reports"] } +foundry-test-utils.workspace = true +foundry-config.workspace = true +foundry-compilers = { workspace = true, features = ["project-util"] } +eyre.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +tokio = { workspace = true, features = ["full"] } +chrono = { version = "0.4", features = ["serde"] } + +[dev-dependencies] +foundry-test-utils.workspace = true \ No newline at end of file diff --git a/benches/forge_build_no_cache.rs b/benches/forge_build_no_cache.rs new file mode 100644 index 0000000000000..51eb1219ff0e6 --- /dev/null +++ b/benches/forge_build_no_cache.rs @@ -0,0 +1,33 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; + +fn benchmark_forge_build_no_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-build-no-cache"); + group.sample_size(10); + + for &version in FOUNDRY_VERSIONS { + // Install foundry version once per version + install_foundry_version(version).expect("Failed to install foundry version"); + + for repo_config in BENCHMARK_REPOS { + // Setup: prepare project OUTSIDE benchmark + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + + // Format: table_name/column_name/row_name + // This creates: forge-build-no-cache/{version}/{repo_name} + let bench_id = BenchmarkId::new(version, repo_config.name); + + group.bench_function(bench_id, |b| { + b.iter(|| { + let output = project.run_forge_build(true).expect("forge build failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_build_no_cache); +criterion_main!(benches); diff --git a/benches/forge_build_with_cache.rs b/benches/forge_build_with_cache.rs new file mode 100644 index 0000000000000..bc0af1de0fb2c --- /dev/null +++ b/benches/forge_build_with_cache.rs @@ -0,0 +1,36 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; + +fn benchmark_forge_build_with_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-build-with-cache"); + group.sample_size(10); + + for &version in FOUNDRY_VERSIONS { + // Install foundry version once per version + install_foundry_version(version).expect("Failed to install foundry version"); + + for repo_config in BENCHMARK_REPOS { + // Setup: prepare project OUTSIDE benchmark + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + + // Prime the cache OUTSIDE benchmark + let _ = project.run_forge_build(false); + + // Format: table_name/column_name/row_name + // This creates: forge-build-with-cache/{version}/{repo_name} + let bench_id = BenchmarkId::new(version, repo_config.name); + + group.bench_function(bench_id, |b| { + b.iter(|| { + let output = project.run_forge_build(false).expect("forge build failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_build_with_cache); +criterion_main!(benches); diff --git a/benches/forge_test.rs b/benches/forge_test.rs new file mode 100644 index 0000000000000..08c46dcbbbe17 --- /dev/null +++ b/benches/forge_test.rs @@ -0,0 +1,36 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; + +fn benchmark_forge_test(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-test"); + group.sample_size(10); + + for &version in FOUNDRY_VERSIONS { + // Install foundry version once per version + install_foundry_version(version).expect("Failed to install foundry version"); + + for repo_config in BENCHMARK_REPOS { + // Setup: prepare project OUTSIDE benchmark + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + + // Build the project before running tests + project.run_forge_build(false).expect("forge build failed"); + + // Format: table_name/column_name/row_name + // This creates: forge-test/{version}/{repo_name} + let bench_id = BenchmarkId::new(version, repo_config.name); + + group.bench_function(bench_id, |b| { + b.iter(|| { + let output = project.run_forge_test().expect("forge test failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_test); +criterion_main!(benches); diff --git a/benches/src/lib.rs b/benches/src/lib.rs new file mode 100644 index 0000000000000..2c274690bc663 --- /dev/null +++ b/benches/src/lib.rs @@ -0,0 +1,171 @@ +use eyre::{Result, WrapErr}; +use foundry_compilers::project_util::TempProject; +use foundry_test_utils::util::clone_remote; +use std::{ + path::{Path, PathBuf}, + process::{Command, Output}, +}; + +/// Configuration for repositories to benchmark +#[derive(Debug, Clone)] +pub struct RepoConfig { + pub name: &'static str, + pub org: &'static str, + pub repo: &'static str, + pub rev: &'static str, +} + +/// Available repositories for benchmarking +pub static BENCHMARK_REPOS: &[RepoConfig] = &[ + RepoConfig { name: "account", org: "ithacaxyz", repo: "account", rev: "main" }, + // Temporarily reduced for testing + // RepoConfig { name: "solady", org: "Vectorized", repo: "solady", rev: "main" }, + // RepoConfig { name: "v4-core", org: "Uniswap", repo: "v4-core", rev: "main" }, + // RepoConfig { name: "morpho-blue", org: "morpho-org", repo: "morpho-blue", rev: "main" }, + // RepoConfig { name: "spark-psm", org: "marsfoundation", repo: "spark-psm", rev: "master" }, +]; + +/// Foundry versions to benchmark +pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; + +/// A benchmark project that represents a cloned repository ready for testing +pub struct BenchmarkProject { + pub name: String, + pub temp_project: TempProject, + pub root_path: PathBuf, +} + +impl BenchmarkProject { + /// Set up a benchmark project by cloning the repository + pub fn setup(config: &RepoConfig) -> Result { + let temp_project = + TempProject::dapptools().wrap_err("Failed to create temporary project")?; + + // Get root path before clearing + let root_path = temp_project.root().to_path_buf(); + let root = root_path.to_str().unwrap(); + + // Remove all files in the directory + for entry in std::fs::read_dir(&root_path)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + std::fs::remove_dir_all(&path).ok(); + } else { + std::fs::remove_file(&path).ok(); + } + } + + // Clone the repository + let repo_url = format!("https://github.com/{}/{}.git", config.org, config.repo); + clone_remote(&repo_url, root); + + // Checkout specific revision if provided + if !config.rev.is_empty() && config.rev != "main" && config.rev != "master" { + let status = Command::new("git") + .current_dir(root) + .args(["checkout", config.rev]) + .status() + .wrap_err("Failed to checkout revision")?; + + if !status.success() { + eyre::bail!("Git checkout failed for {}", config.name); + } + } + + // Install dependencies + Self::install_dependencies(&root_path)?; + + Ok(BenchmarkProject { name: config.name.to_string(), root_path, temp_project }) + } + + /// Install forge dependencies for the project + fn install_dependencies(root: &Path) -> Result<()> { + // Install forge dependencies if foundry.toml exists + if root.join("foundry.toml").exists() { + let status = Command::new("forge") + .current_dir(root) + .args(["install"]) + .status() + .wrap_err("Failed to run forge install")?; + + if !status.success() { + println!("Warning: forge install failed for {}", root.display()); + } + } + + // Install npm dependencies if package.json exists + if root.join("package.json").exists() { + let status = Command::new("npm") + .current_dir(root) + .args(["install"]) + .status() + .wrap_err("Failed to run npm install")?; + + if !status.success() { + println!("Warning: npm install failed for {}", root.display()); + } + } + + Ok(()) + } + + /// Run forge test command and return the output + pub fn run_forge_test(&self) -> Result { + Command::new("forge") + .current_dir(&self.root_path) + .args(["test"]) + .output() + .wrap_err("Failed to run forge test") + } + + /// Run forge build command and return the output + pub fn run_forge_build(&self, clean_cache: bool) -> Result { + if clean_cache { + // Clean first + let _ = Command::new("forge").current_dir(&self.root_path).args(["clean"]).output(); + } + + Command::new("forge") + .current_dir(&self.root_path) + .args(["build"]) + .output() + .wrap_err("Failed to run forge build") + } + + /// Get the root path of the project + pub fn root(&self) -> &Path { + &self.root_path + } +} + +/// Install a specific foundry version +pub fn install_foundry_version(version: &str) -> Result<()> { + let status = Command::new("foundryup") + .args(["--install", version]) + .status() + .wrap_err("Failed to run foundryup")?; + + if !status.success() { + eyre::bail!("Failed to install foundry version: {}", version); + } + + Ok(()) +} + +/// Get the current forge version +pub fn get_forge_version() -> Result { + let output = Command::new("forge") + .args(["--version"]) + .output() + .wrap_err("Failed to get forge version")?; + + if !output.status.success() { + eyre::bail!("forge --version failed"); + } + + let version = + String::from_utf8(output.stdout).wrap_err("Invalid UTF-8 in forge version output")?; + + Ok(version.lines().next().unwrap_or("unknown").to_string()) +} From 0dc81870554a6ec2f9f9a3fda180afe47a5e3897 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Wed, 25 Jun 2025 16:56:18 +0530 Subject: [PATCH 2/9] - setup benchmark repos in parallel - run forge build in parallet for forge-test bench - switch foundry versions - README specifying prereqs --- Cargo.lock | 1 + benches/Cargo.toml | 1 + benches/README.md | 88 +++++++++++++++++++++++++++++++ benches/forge_build_no_cache.rs | 23 +++++--- benches/forge_build_with_cache.rs | 27 +++++++--- benches/forge_test.rs | 28 ++++++---- benches/src/lib.rs | 22 ++++++-- 7 files changed, 162 insertions(+), 28 deletions(-) create mode 100644 benches/README.md diff --git a/Cargo.lock b/Cargo.lock index 812228968ba4b..98999f6f16ed0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4090,6 +4090,7 @@ dependencies = [ "foundry-compilers", "foundry-config", "foundry-test-utils", + "rayon", "serde", "serde_json", "tempfile", diff --git a/benches/Cargo.toml b/benches/Cargo.toml index 4d13048ad860b..faa0a13a04d63 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -29,6 +29,7 @@ serde_json.workspace = true tempfile.workspace = true tokio = { workspace = true, features = ["full"] } chrono = { version = "0.4", features = ["serde"] } +rayon.workspace = true [dev-dependencies] foundry-test-utils.workspace = true \ No newline at end of file diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 0000000000000..8bdfcc9f63ea2 --- /dev/null +++ b/benches/README.md @@ -0,0 +1,88 @@ +# Foundry Benchmarks + +This directory contains performance benchmarks for Foundry commands across multiple repositories and Foundry versions. + +## Prerequisites + +Before running the benchmarks, ensure you have the following installed: + +1. **Rust and Cargo** - Required for building and running the benchmarks + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + ``` + +2. **Foundryup** - The Foundry toolchain installer + ```bash + curl -L https://foundry.paradigm.xyz | bash + foundryup + ``` + +3. **Required Foundry Versions** - Install all versions defined in `src/lib.rs` (see `FOUNDRY_VERSIONS`) + ```bash + foundryup --install stable + foundryup --install nightly + # Install any additional versions you add to FOUNDRY_VERSIONS in src/lib.rs + ``` + +4. **Git** - For cloning benchmark repositories + +5. **npm** - Some repositories require npm dependencies + ```bash + # Install Node.js and npm from https://nodejs.org/ + ``` + +## Running Benchmarks + +### Run all benchmarks +```bash +cargo bench +``` + +### Run specific benchmark +```bash +cargo bench forge_test +cargo bench forge_build_no_cache +cargo bench forge_build_with_cache +``` + +### Generate HTML reports +Criterion automatically generates HTML reports in `target/criterion/`. Open the reports in a browser: +```bash +open target/criterion/report/index.html +``` + +## Benchmark Structure + +- `forge_test` - Benchmarks `forge test` command across repos +- `forge_build_no_cache` - Benchmarks `forge build` with clean cache +- `forge_build_with_cache` - Benchmarks `forge build` with existing cache + +## Configuration + +### Repositories +Edit `src/lib.rs` to modify the list of repositories to benchmark: +```rust +pub static BENCHMARK_REPOS: &[RepoConfig] = &[ + RepoConfig { name: "account", org: "ithacaxyz", repo: "account", rev: "main" }, + // Add more repositories here +]; +``` + +### Foundry Versions +Edit `src/lib.rs` to modify the list of Foundry versions: +```rust +pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; +``` + +## Results + +Benchmark results are displayed in the terminal and saved as HTML reports. The reports show: +- Execution time statistics (mean, median, standard deviation) +- Comparison between different Foundry versions +- Performance trends across repositories + +## Troubleshooting + +1. **Foundry version not found**: Ensure the version is installed with `foundryup --install ` +2. **Repository clone fails**: Check network connectivity and repository access +3. **Build failures**: Some repositories may have specific dependencies - check their README files \ No newline at end of file diff --git a/benches/forge_build_no_cache.rs b/benches/forge_build_no_cache.rs index 51eb1219ff0e6..a84ce5c691b54 100644 --- a/benches/forge_build_no_cache.rs +++ b/benches/forge_build_no_cache.rs @@ -1,18 +1,27 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_build_no_cache(c: &mut Criterion) { let mut group = c.benchmark_group("forge-build-no-cache"); group.sample_size(10); - for &version in FOUNDRY_VERSIONS { - // Install foundry version once per version - install_foundry_version(version).expect("Failed to install foundry version"); - - for repo_config in BENCHMARK_REPOS { - // Setup: prepare project OUTSIDE benchmark + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + // Setup: prepare project (clone repo) let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + for &version in FOUNDRY_VERSIONS { + // Switch foundry version + switch_foundry_version(version).expect("Failed to switch foundry version"); + // Run benchmarks for each project + for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-build-no-cache/{version}/{repo_name} let bench_id = BenchmarkId::new(version, repo_config.name); diff --git a/benches/forge_build_with_cache.rs b/benches/forge_build_with_cache.rs index bc0af1de0fb2c..33f788cdfbfa2 100644 --- a/benches/forge_build_with_cache.rs +++ b/benches/forge_build_with_cache.rs @@ -1,21 +1,32 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_build_with_cache(c: &mut Criterion) { let mut group = c.benchmark_group("forge-build-with-cache"); group.sample_size(10); - for &version in FOUNDRY_VERSIONS { - // Install foundry version once per version - install_foundry_version(version).expect("Failed to install foundry version"); - - for repo_config in BENCHMARK_REPOS { - // Setup: prepare project OUTSIDE benchmark + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + // Setup: prepare project (clone repo) let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + for &version in FOUNDRY_VERSIONS { + // Switch foundry version once per version + switch_foundry_version(version).expect("Failed to switch foundry version"); - // Prime the cache OUTSIDE benchmark + // Prime the cache for all projects in parallel + projects.par_iter().for_each(|(repo_config, project)| { let _ = project.run_forge_build(false); + }); + // Run benchmarks for each project + for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-build-with-cache/{version}/{repo_name} let bench_id = BenchmarkId::new(version, repo_config.name); diff --git a/benches/forge_test.rs b/benches/forge_test.rs index 08c46dcbbbe17..fab4d59184232 100644 --- a/benches/forge_test.rs +++ b/benches/forge_test.rs @@ -1,21 +1,31 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{install_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; - +use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_test(c: &mut Criterion) { let mut group = c.benchmark_group("forge-test"); group.sample_size(10); - for &version in FOUNDRY_VERSIONS { - // Install foundry version once per version - install_foundry_version(version).expect("Failed to install foundry version"); - - for repo_config in BENCHMARK_REPOS { - // Setup: prepare project OUTSIDE benchmark + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + // Setup: prepare project (clone repo) let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + for &version in FOUNDRY_VERSIONS { + // Switch foundry version once per version + switch_foundry_version(version).expect("Failed to switch foundry version"); - // Build the project before running tests + // Build all projects in parallel for this foundry version + projects.par_iter().for_each(|(_repo_config, project)| { project.run_forge_build(false).expect("forge build failed"); + }); + // Run benchmarks for each project + for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-test/{version}/{repo_name} let bench_id = BenchmarkId::new(version, repo_config.name); diff --git a/benches/src/lib.rs b/benches/src/lib.rs index 2c274690bc663..8994b06f745be 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -26,6 +26,20 @@ pub static BENCHMARK_REPOS: &[RepoConfig] = &[ ]; /// Foundry versions to benchmark +/// +/// To add more versions for comparison, install them first: +/// ```bash +/// foundryup --install stable +/// foundryup --install nightly +/// foundryup --install v0.2.0 # Example specific version +/// ``` +/// +/// Then add the version strings to this array. Supported formats: +/// - "stable" - Latest stable release +/// - "nightly" - Latest nightly build +/// - "v0.2.0" - Specific version tag +/// - "commit-hash" - Specific commit hash +/// - "nightly-" - Nightly build with specific revision pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; /// A benchmark project that represents a cloned repository ready for testing @@ -139,15 +153,15 @@ impl BenchmarkProject { } } -/// Install a specific foundry version -pub fn install_foundry_version(version: &str) -> Result<()> { +/// Switch to a specific foundry version +pub fn switch_foundry_version(version: &str) -> Result<()> { let status = Command::new("foundryup") - .args(["--install", version]) + .args(["--use", version]) .status() .wrap_err("Failed to run foundryup")?; if !status.success() { - eyre::bail!("Failed to install foundry version: {}", version); + eyre::bail!("Failed to switch to foundry version: {}", version); } Ok(()) From 9f13124fdb05b3652acc0e66d2f9764812c9b41f Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Wed, 25 Jun 2025 17:56:58 +0530 Subject: [PATCH 3/9] feat: shell script to run benches --- benches/LATEST.md | 108 ++++++----- benches/forge_build_no_cache.rs | 4 +- benches/forge_build_with_cache.rs | 8 +- benches/forge_test.rs | 4 +- benches/run_benchmarks.sh | 289 ++++++++++++++++++++++++++++++ benches/src/lib.rs | 8 +- 6 files changed, 370 insertions(+), 51 deletions(-) create mode 100755 benches/run_benchmarks.sh diff --git a/benches/LATEST.md b/benches/LATEST.md index b212d64452d7b..bc373cb9d4c5d 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,85 +1,107 @@ -# Forge Benchmarking Results +# Foundry Benchmarking Results -**Generated on:** Thu 12 Jun 2025 16:57:20 CEST -**Hyperfine Version:** hyperfine 1.19.0 -**Foundry Versions Tested:** stable nightly-ac0411d0e3b9632247c9aea9535472eda09a57ae nightly -**Repositories Tested:** ithacaxyz-account solady +**Generated on:** Wed Jun 25 17:43:45 IST 2025 +**Tool:** Criterion.rs with criterion-table +**Foundry Versions Tested:** stable nightly +**Repositories Tested:** account solady v4-core morpho-blue spark-psm ## Summary -This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects. +This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects using Criterion.rs for precise performance measurements. + The following benchmarks were performed: -1. **forge test - Running the test suite (5 runs, 1 warmup)** -2. **forge build (no cache) - Clean build without cache (5 runs, cache cleaned after each run)** -3. **forge build (with cache) - Build with warm cache (5 runs, 1 warmup)** +1. **forge-test** - Running the test suite (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) +3. **forge-build-with-cache** - Build with warm cache (10 samples each) --- ## Performance Comparison Tables -### forge test - -Mean execution time in seconds (lower is better): - -| Project | stable (s) | nightly-ac0411d0e3b9632247c9aea9535472eda09a57ae (s) | nightly (s) | -| --------------------- | ---------: | ---------------------------------------------------: | ----------: | -| **ithacaxyz-account** | 4.662 | 3.738 | 5.588 | -| **solady** | 3.559 | 2.933 | 3.517 | +# Benchmarks -### forge build no cache +## Table of Contents -Mean execution time in seconds (lower is better): +- [Benchmark Results](#benchmark-results) + - [forge-build-with-cache](#forge-build-with-cache) -| Project | stable (s) | nightly-ac0411d0e3b9632247c9aea9535472eda09a57ae (s) | nightly (s) | -| --------------------- | ---------: | ---------------------------------------------------: | ----------: | -| **ithacaxyz-account** | 10.777 | 10.982 | 10.979 | -| **solady** | 17.486 | 17.139 | 17.509 | +## Benchmark Results -### forge build with cache +### forge-build-with-cache -Mean execution time in seconds (lower is better): +| | `stable` | `nightly` | +|:--------------|:--------------------------|:--------------------------------- | +| **`account`** | `164.00 ms` (✅ **1.00x**) | `166.34 ms` (✅ **1.01x slower**) | -| Project | stable (s) | nightly-ac0411d0e3b9632247c9aea9535472eda09a57ae (s) | nightly (s) | -| --------------------- | ---------: | ---------------------------------------------------: | ----------: | -| **ithacaxyz-account** | 0.111 | 0.113 | 0.158 | -| **solady** | 0.084 | 0.089 | 0.108 | +--- +Made with [criterion-table](https://github.com/nu11ptr/criterion-table) +[INFO] Getting Foundry version information... ## Foundry Version Details ### stable ``` +foundryup: use - forge Version: 1.2.3-stable +Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f +Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) +Build Profile: maxperf +foundryup: use - cast Version: 1.2.3-stable +Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f +Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) +Build Profile: maxperf +foundryup: use - anvil Version: 1.2.3-stable +Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f +Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) +Build Profile: maxperf +foundryup: use - chisel Version: 1.2.3-stable +Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f +Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) +Build Profile: maxperf forge Version: 1.2.3-stable ``` -### nightly-ac0411d0e3b9632247c9aea9535472eda09a57ae - -``` -forge Version: 1.2.3-nightly -``` - ### nightly ``` +foundryup: use - forge Version: 1.2.3-nightly +Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f +Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) +Build Profile: maxperf +foundryup: use - cast Version: 1.2.3-nightly +Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f +Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) +Build Profile: maxperf +foundryup: use - anvil Version: 1.2.3-nightly +Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f +Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) +Build Profile: maxperf +foundryup: use - chisel Version: 1.2.3-nightly +Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f +Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) +Build Profile: maxperf forge Version: 1.2.3-nightly ``` ## Notes -- All benchmarks were run with hyperfine in parallel mode -- **forge test - Running the test suite (5 runs, 1 warmup)** -- **forge build (no cache) - Clean build without cache (5 runs, cache cleaned after each run)** -- **forge build (with cache) - Build with warm cache (5 runs, 1 warmup)** -- Results show mean execution time in seconds -- N/A indicates benchmark failed. +- All benchmarks use Criterion.rs for statistical analysis +- Each benchmark runs 10 samples by default +- Results show mean execution time with confidence intervals +- Repositories are cloned once and reused across all Foundry versions +- Build and setup operations are parallelized using Rayon +- The first version tested becomes the baseline for comparisons ## System Information - **OS:** Darwin - **Architecture:** arm64 -- **Date:** Thu 12 Jun 2025 16:57:21 CEST +- **Date:** Wed Jun 25 17:43:46 IST 2025 ## Raw Data -Raw JSON benchmark data is available in: `/Users/yash/dev/paradigm/foundry-rs/foundry/benches/benchmark_results/json_20250612_165120` +Detailed benchmark data and HTML reports are available in: +- `target/criterion/` - Individual benchmark reports +- `target/criterion/report/index.html` - Combined HTML report + diff --git a/benches/forge_build_no_cache.rs b/benches/forge_build_no_cache.rs index a84ce5c691b54..4b4f1b94b125e 100644 --- a/benches/forge_build_no_cache.rs +++ b/benches/forge_build_no_cache.rs @@ -1,10 +1,10 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_build_no_cache(c: &mut Criterion) { let mut group = c.benchmark_group("forge-build-no-cache"); - group.sample_size(10); + group.sample_size(SAMPLE_SIZE); // Setup all projects once - clone repos in parallel let projects: Vec<_> = BENCHMARK_REPOS diff --git a/benches/forge_build_with_cache.rs b/benches/forge_build_with_cache.rs index 33f788cdfbfa2..e8df74732eda4 100644 --- a/benches/forge_build_with_cache.rs +++ b/benches/forge_build_with_cache.rs @@ -1,10 +1,12 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use foundry_bench::{ + switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE, +}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_build_with_cache(c: &mut Criterion) { let mut group = c.benchmark_group("forge-build-with-cache"); - group.sample_size(10); + group.sample_size(SAMPLE_SIZE); // Setup all projects once - clone repos in parallel let projects: Vec<_> = BENCHMARK_REPOS @@ -21,7 +23,7 @@ fn benchmark_forge_build_with_cache(c: &mut Criterion) { switch_foundry_version(version).expect("Failed to switch foundry version"); // Prime the cache for all projects in parallel - projects.par_iter().for_each(|(repo_config, project)| { + projects.par_iter().for_each(|(_repo_config, project)| { let _ = project.run_forge_build(false); }); diff --git a/benches/forge_test.rs b/benches/forge_test.rs index fab4d59184232..6832aee005d0c 100644 --- a/benches/forge_test.rs +++ b/benches/forge_test.rs @@ -1,9 +1,9 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS}; +use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_test(c: &mut Criterion) { let mut group = c.benchmark_group("forge-test"); - group.sample_size(10); + group.sample_size(SAMPLE_SIZE); // Setup all projects once - clone repos in parallel let projects: Vec<_> = BENCHMARK_REPOS diff --git a/benches/run_benchmarks.sh b/benches/run_benchmarks.sh new file mode 100755 index 0000000000000..ed2afc36fa9f2 --- /dev/null +++ b/benches/run_benchmarks.sh @@ -0,0 +1,289 @@ +#!/bin/bash + +# Foundry Benchmark Runner with Criterion Table Output +# This script runs the criterion-based benchmarks and generates a markdown report + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +# Check if required tools are installed +check_dependencies() { + if ! command -v criterion-table &> /dev/null; then + log_error "criterion-table is not installed. Please install it with:" + echo "cargo install criterion-table" + exit 1 + fi + + if ! cargo criterion --help &> /dev/null; then + log_error "cargo-criterion is not installed. Please install it with:" + echo "cargo install cargo-criterion" + exit 1 + fi +} + +# Get system information +get_system_info() { + local os_name=$(uname -s) + local arch=$(uname -m) + local date=$(date) + + echo "- **OS:** $os_name" + echo "- **Architecture:** $arch" + echo "- **Date:** $date" +} + + +# Run benchmarks and generate report +run_benchmarks() { + log_info "Running Foundry benchmarks..." + + # Create temp files for each benchmark + local temp_dir=$(mktemp -d) + local forge_test_json="$temp_dir/forge_test.json" + local forge_build_no_cache_json="$temp_dir/forge_build_no_cache.json" + local forge_build_with_cache_json="$temp_dir/forge_build_with_cache.json" + + # Set up output redirection based on verbose flag + local output_redirect="" + if [[ "${VERBOSE:-false}" != "true" ]]; then + output_redirect="2>/dev/null" + fi + + # Run benchmarks in specific order (this determines baseline column) + log_info "Running forge_test benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_test --message-format=json > "$forge_test_json" || { + log_error "forge_test benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_test --message-format=json > "$forge_test_json" 2>/dev/null || { + log_error "forge_test benchmark failed" + exit 1 + } + fi + + log_info "Running forge_build_no_cache benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_build_no_cache --message-format=json > "$forge_build_no_cache_json" || { + log_error "forge_build_no_cache benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_build_no_cache --message-format=json > "$forge_build_no_cache_json" 2>/dev/null || { + log_error "forge_build_no_cache benchmark failed" + exit 1 + } + fi + + log_info "Running forge_build_with_cache benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_build_with_cache --message-format=json > "$forge_build_with_cache_json" || { + log_error "forge_build_with_cache benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_build_with_cache --message-format=json > "$forge_build_with_cache_json" 2>/dev/null || { + log_error "forge_build_with_cache benchmark failed" + exit 1 + } + fi + + # Combine all results and generate markdown + log_info "Generating markdown report with criterion-table..." + + # Generate the final report + generate_report "$temp_dir/tables.md" + + # Cleanup + rm -rf "$temp_dir" + + log_success "Benchmark report generated in LATEST.md" +} + +# Generate the final markdown report +generate_report() { + local tables_file="$1" + local report_file="LATEST.md" + + log_info "Generating final report..." + + # Get current timestamp + local timestamp=$(date) + + # Get repository information and create numbered list with links + local versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + + # Extract repository info for numbered list + local repo_list="" + local counter=1 + + # Parse the BENCHMARK_REPOS section + while IFS= read -r line; do + if [[ $line =~ RepoConfig.*name:.*\"([^\"]+)\".*org:.*\"([^\"]+)\".*repo:.*\"([^\"]+)\" ]]; then + local name="${BASH_REMATCH[1]}" + local org="${BASH_REMATCH[2]}" + local repo="${BASH_REMATCH[3]}" + repo_list+="$counter. [$name](https://github.com/$org/$repo)\n" + ((counter++)) + fi + done < <(grep -A 20 'pub static BENCHMARK_REPOS' src/lib.rs | grep 'RepoConfig') + + # Write the report + cat > "$report_file" << EOF +# Foundry Benchmarking Results + +**Generated on:** $timestamp +**Foundry Versions Tested:** $versions + +## Repositories Tested + +$(echo -e "$repo_list") + +## Summary + +This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects using Criterion.rs for precise performance measurements. + +The following benchmarks were performed: + +1. **forge-test** - Running the test suite (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) +3. **forge-build-with-cache** - Build with warm cache (10 samples each) + +--- + +## Performance Comparison Tables + +EOF + + # Append the criterion-table generated tables + cat "$tables_file" >> "$report_file" + + # Add notes and system info + cat >> "$report_file" << EOF +## Notes + +- All benchmarks use Criterion.rs for statistical analysis +- Each benchmark runs 10 samples by default +- Results show mean execution time with confidence intervals +- Repositories are cloned once and reused across all Foundry versions +- Build and setup operations are parallelized using Rayon +- The first version tested becomes the baseline for comparisons + +## System Information + +$(get_system_info) + +## Raw Data + +Detailed benchmark data and HTML reports are available in: +- \`target/criterion/\` - Individual benchmark reports + +EOF + + log_success "Report written to $report_file" +} + +# Main function +main() { + log_info "Starting Foundry benchmark suite..." + + # Check dependencies + check_dependencies + + # Run benchmarks and generate report + run_benchmarks + + log_success "Benchmark suite completed successfully!" + echo "" + echo "View the results:" + echo " - Text report: cat LATEST.md" + echo " - HTML report: open target/criterion/report/index.html" +} + +# Help function +show_help() { + cat << EOF +Foundry Benchmark Runner + +This script runs Criterion-based benchmarks for Foundry commands and generates +a markdown report using criterion-table. + +USAGE: + $0 [OPTIONS] + +OPTIONS: + -h, --help Show this help message + -v, --version Show version information + --verbose Show benchmark output (by default output is suppressed) + +REQUIREMENTS: + - criterion-table: cargo install criterion-table + - cargo-criterion: cargo install cargo-criterion + - All Foundry versions defined in src/lib.rs must be installed + +EXAMPLES: + $0 # Run all benchmarks and generate LATEST.md + $0 --verbose # Run benchmarks with full output visible + +The script will: +1. Run forge_test, forge_build_no_cache, and forge_build_with_cache benchmarks +2. Generate comparison tables using criterion-table +3. Include system information and Foundry version details +4. Save the complete report to LATEST.md + +EOF +} + +# Parse command line arguments +VERBOSE=false +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + show_help + exit 0 + ;; + -v|--version) + echo "Foundry Benchmark Runner v1.0.0" + exit 0 + ;; + --verbose) + VERBOSE=true + shift + ;; + *) + log_error "Unknown option: $1" + echo "Use -h or --help for usage information" + exit 1 + ;; + esac +done + +main \ No newline at end of file diff --git a/benches/src/lib.rs b/benches/src/lib.rs index 8994b06f745be..5d5814103a6e8 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -17,7 +17,7 @@ pub struct RepoConfig { /// Available repositories for benchmarking pub static BENCHMARK_REPOS: &[RepoConfig] = &[ - RepoConfig { name: "account", org: "ithacaxyz", repo: "account", rev: "main" }, + RepoConfig { name: "ithacaxyz-account", org: "ithacaxyz", repo: "account", rev: "main" }, // Temporarily reduced for testing // RepoConfig { name: "solady", org: "Vectorized", repo: "solady", rev: "main" }, // RepoConfig { name: "v4-core", org: "Uniswap", repo: "v4-core", rev: "main" }, @@ -25,6 +25,12 @@ pub static BENCHMARK_REPOS: &[RepoConfig] = &[ // RepoConfig { name: "spark-psm", org: "marsfoundation", repo: "spark-psm", rev: "master" }, ]; +/// Sample size for benchmark measurements +/// +/// This controls how many times each benchmark is run for statistical analysis. +/// Higher values provide more accurate results but take longer to complete. +pub const SAMPLE_SIZE: usize = 10; + /// Foundry versions to benchmark /// /// To add more versions for comparison, install them first: From 7d1d85a3f4a0ee1b8b00a337e7e4155701c31c28 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Wed, 25 Jun 2025 18:34:05 +0530 Subject: [PATCH 4/9] feat: ci workflow, fix script --- .github/workflows/benchmarks.yml | 108 +++++++++++++++++++++++++++++++ benches/LATEST.md | 82 ++++++++--------------- benches/README.md | 24 ++++--- benches/run_benchmarks.sh | 46 ++++++++++++- 4 files changed, 192 insertions(+), 68 deletions(-) create mode 100644 .github/workflows/benchmarks.yml diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 0000000000000..d5c6e0cecf501 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,108 @@ +name: Foundry Benchmarks + +on: + workflow_dispatch: + inputs: + pr_number: + description: "PR number to comment on (optional)" + required: false + type: string + +permissions: + contents: write + pull-requests: write + +jobs: + benchmark: + name: Run Foundry Benchmarks + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + ./ + + - name: Install foundryup + run: | + curl -L https://foundry.paradigm.xyz | bash + echo "$HOME/.foundry/bin" >> $GITHUB_PATH + + - name: Install benchmark dependencies + run: | + cargo install cargo-criterion + cargo install criterion-table + + - name: Run benchmarks + working-directory: ./benches + run: | + chmod +x run_benchmarks.sh + ./run_benchmarks.sh + + - name: Commit benchmark results + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add benches/LATEST.md + if git diff --staged --quiet; then + echo "No changes to commit" + else + git commit -m "Update benchmark results + + 🤖 Generated with [Foundry Benchmarks](https://github.com/${{ github.repository }}/actions) + + Co-Authored-By: github-actions " + git push + fi + + - name: Read benchmark results + id: benchmark_results + run: | + if [ -f "benches/LATEST.md" ]; then + { + echo 'results<> $GITHUB_OUTPUT + else + echo 'results=No benchmark results found.' >> $GITHUB_OUTPUT + fi + + - name: Comment on PR + if: github.event.inputs.pr_number != '' + uses: actions/github-script@v7 + with: + script: | + const prNumber = ${{ github.event.inputs.pr_number }}; + const benchmarkResults = `${{ steps.benchmark_results.outputs.results }}`; + + const comment = `## 📊 Foundry Benchmark Results + +
+ Click to view detailed benchmark results + + ${benchmarkResults} + +
+ + --- + + 🤖 This comment was automatically generated by the [Foundry Benchmarks workflow](https://github.com/${{ github.repository }}/actions). + + To run benchmarks manually: Go to [Actions](https://github.com/${{ github.repository }}/actions/workflows/foundry-benchmarks.yml) → "Run workflow"`; + + github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body: comment + }); + diff --git a/benches/LATEST.md b/benches/LATEST.md index bc373cb9d4c5d..9d6be5a7291d0 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,9 +1,15 @@ # Foundry Benchmarking Results -**Generated on:** Wed Jun 25 17:43:45 IST 2025 -**Tool:** Criterion.rs with criterion-table +**Generated on:** Wed Jun 25 18:27:00 IST 2025 **Foundry Versions Tested:** stable nightly -**Repositories Tested:** account solady v4-core morpho-blue spark-psm + +## Repositories Tested + +1. [ithacaxyz-account](https://github.com/ithacaxyz/main) +2. [solady](https://github.com/Vectorized/main) +3. [v4-core](https://github.com/Uniswap/main) +4. [morpho-blue](https://github.com/morpho-org/main) +5. [spark-psm](https://github.com/marsfoundation/master) ## Summary @@ -24,66 +30,33 @@ The following benchmarks were performed: ## Table of Contents - [Benchmark Results](#benchmark-results) + - [forge-test](#forge-test) + - [forge-build-no-cache](#forge-build-no-cache) - [forge-build-with-cache](#forge-build-with-cache) ## Benchmark Results +### forge-test + +| | `stable` | `nightly` | +|:------------------------|:-----------------------|:------------------------------ | +| **`ithacaxyz-account`** | `3.73 s` (✅ **1.00x**) | `3.30 s` (✅ **1.13x faster**) | + +### forge-build-no-cache + +| | `stable` | `nightly` | +|:------------------------|:------------------------|:------------------------------- | +| **`ithacaxyz-account`** | `14.32 s` (✅ **1.00x**) | `14.37 s` (✅ **1.00x slower**) | + ### forge-build-with-cache -| | `stable` | `nightly` | -|:--------------|:--------------------------|:--------------------------------- | -| **`account`** | `164.00 ms` (✅ **1.00x**) | `166.34 ms` (✅ **1.01x slower**) | +| | `stable` | `nightly` | +|:------------------------|:--------------------------|:--------------------------------- | +| **`ithacaxyz-account`** | `162.64 ms` (✅ **1.00x**) | `167.49 ms` (✅ **1.03x slower**) | --- Made with [criterion-table](https://github.com/nu11ptr/criterion-table) -[INFO] Getting Foundry version information... -## Foundry Version Details - -### stable - -``` -foundryup: use - forge Version: 1.2.3-stable -Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f -Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) -Build Profile: maxperf -foundryup: use - cast Version: 1.2.3-stable -Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f -Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) -Build Profile: maxperf -foundryup: use - anvil Version: 1.2.3-stable -Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f -Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) -Build Profile: maxperf -foundryup: use - chisel Version: 1.2.3-stable -Commit SHA: a813a2cee7dd4926e7c56fd8a785b54f32e0d10f -Build Timestamp: 2025-06-08T15:42:50.507050000Z (1749397370) -Build Profile: maxperf -forge Version: 1.2.3-stable -``` - -### nightly - -``` -foundryup: use - forge Version: 1.2.3-nightly -Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f -Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) -Build Profile: maxperf -foundryup: use - cast Version: 1.2.3-nightly -Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f -Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) -Build Profile: maxperf -foundryup: use - anvil Version: 1.2.3-nightly -Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f -Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) -Build Profile: maxperf -foundryup: use - chisel Version: 1.2.3-nightly -Commit SHA: b515c90b9be9645b844943fc6d54f2304b83f75f -Build Timestamp: 2025-06-18T06:02:35.553006000Z (1750226555) -Build Profile: maxperf -forge Version: 1.2.3-nightly -``` - ## Notes - All benchmarks use Criterion.rs for statistical analysis @@ -97,11 +70,10 @@ forge Version: 1.2.3-nightly - **OS:** Darwin - **Architecture:** arm64 -- **Date:** Wed Jun 25 17:43:46 IST 2025 +- **Date:** Wed Jun 25 18:27:01 IST 2025 ## Raw Data Detailed benchmark data and HTML reports are available in: - `target/criterion/` - Individual benchmark reports -- `target/criterion/report/index.html` - Combined HTML report diff --git a/benches/README.md b/benches/README.md index 8bdfcc9f63ea2..f410e2bcdcba3 100644 --- a/benches/README.md +++ b/benches/README.md @@ -7,26 +7,21 @@ This directory contains performance benchmarks for Foundry commands across multi Before running the benchmarks, ensure you have the following installed: 1. **Rust and Cargo** - Required for building and running the benchmarks + ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh ``` 2. **Foundryup** - The Foundry toolchain installer + ```bash curl -L https://foundry.paradigm.xyz | bash foundryup ``` -3. **Required Foundry Versions** - Install all versions defined in `src/lib.rs` (see `FOUNDRY_VERSIONS`) - ```bash - foundryup --install stable - foundryup --install nightly - # Install any additional versions you add to FOUNDRY_VERSIONS in src/lib.rs - ``` +3. **Git** - For cloning benchmark repositories -4. **Git** - For cloning benchmark repositories - -5. **npm** - Some repositories require npm dependencies +4. **npm** - Some repositories require npm dependencies ```bash # Install Node.js and npm from https://nodejs.org/ ``` @@ -34,11 +29,13 @@ Before running the benchmarks, ensure you have the following installed: ## Running Benchmarks ### Run all benchmarks + ```bash cargo bench ``` ### Run specific benchmark + ```bash cargo bench forge_test cargo bench forge_build_no_cache @@ -46,7 +43,9 @@ cargo bench forge_build_with_cache ``` ### Generate HTML reports + Criterion automatically generates HTML reports in `target/criterion/`. Open the reports in a browser: + ```bash open target/criterion/report/index.html ``` @@ -60,7 +59,9 @@ open target/criterion/report/index.html ## Configuration ### Repositories + Edit `src/lib.rs` to modify the list of repositories to benchmark: + ```rust pub static BENCHMARK_REPOS: &[RepoConfig] = &[ RepoConfig { name: "account", org: "ithacaxyz", repo: "account", rev: "main" }, @@ -69,7 +70,9 @@ pub static BENCHMARK_REPOS: &[RepoConfig] = &[ ``` ### Foundry Versions + Edit `src/lib.rs` to modify the list of Foundry versions: + ```rust pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; ``` @@ -77,6 +80,7 @@ pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; ## Results Benchmark results are displayed in the terminal and saved as HTML reports. The reports show: + - Execution time statistics (mean, median, standard deviation) - Comparison between different Foundry versions - Performance trends across repositories @@ -85,4 +89,4 @@ Benchmark results are displayed in the terminal and saved as HTML reports. The r 1. **Foundry version not found**: Ensure the version is installed with `foundryup --install ` 2. **Repository clone fails**: Check network connectivity and repository access -3. **Build failures**: Some repositories may have specific dependencies - check their README files \ No newline at end of file +3. **Build failures**: Some repositories may have specific dependencies - check their README files diff --git a/benches/run_benchmarks.sh b/benches/run_benchmarks.sh index ed2afc36fa9f2..1a30506a9fdc8 100755 --- a/benches/run_benchmarks.sh +++ b/benches/run_benchmarks.sh @@ -48,6 +48,41 @@ check_dependencies() { fi } +# Check and install all required Foundry versions +check_and_install_foundry() { + log_info "Checking and installing required Foundry versions..." + + # Read the versions from the Rust source + local versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"') + + # Check if foundryup is available + if ! command -v foundryup &> /dev/null; then + log_error "foundryup not found. Please install Foundry first:" + echo "curl -L https://foundry.paradigm.xyz | bash" + exit 1 + fi + + # Install each version if not already available + for version in $versions; do + log_info "Checking Foundry version: $version" + + # Try to switch to the version to check if it's installed + if foundryup --use "$version" 2>/dev/null; then + log_info "✓ Version $version is already installed" + else + log_info "Installing Foundry version: $version" + if foundryup --install "$version"; then + log_success "✓ Successfully installed version $version" + else + log_error "Failed to install Foundry version: $version" + exit 1 + fi + fi + done + + log_success "All required Foundry versions are available" +} + # Get system information get_system_info() { local os_name=$(uname -s) @@ -118,6 +153,11 @@ run_benchmarks() { # Combine all results and generate markdown log_info "Generating markdown report with criterion-table..." + + if ! cat "$forge_test_json" "$forge_build_no_cache_json" "$forge_build_with_cache_json" | criterion-table > "$temp_dir/tables.md"; then + log_error "criterion-table failed to process benchmark data" + exit 1 + fi # Generate the final report generate_report "$temp_dir/tables.md" @@ -179,8 +219,6 @@ The following benchmarks were performed: --- -## Performance Comparison Tables - EOF # Append the criterion-table generated tables @@ -218,6 +256,9 @@ main() { # Check dependencies check_dependencies + # Check and install required Foundry versions + check_and_install_foundry + # Run benchmarks and generate report run_benchmarks @@ -225,7 +266,6 @@ main() { echo "" echo "View the results:" echo " - Text report: cat LATEST.md" - echo " - HTML report: open target/criterion/report/index.html" } # Help function From b6671063934b3fd60439a79550d6a5dfad4ef655 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Thu, 26 Jun 2025 12:45:17 +0530 Subject: [PATCH 5/9] update readme --- benches/README.md | 32 ++++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/benches/README.md b/benches/README.md index f410e2bcdcba3..b3663831853b1 100644 --- a/benches/README.md +++ b/benches/README.md @@ -22,32 +22,44 @@ Before running the benchmarks, ensure you have the following installed: 3. **Git** - For cloning benchmark repositories 4. **npm** - Some repositories require npm dependencies + ```bash # Install Node.js and npm from https://nodejs.org/ ``` +5. **Benchmark tools** - Required for generating reports + ```bash + cargo install cargo-criterion + cargo install criterion-table + ``` + ## Running Benchmarks -### Run all benchmarks +### Run the complete benchmark suite ```bash -cargo bench +cargo run ``` -### Run specific benchmark +This will: + +1. Check and install required Foundry versions +2. Run all benchmark suites (forge_test, forge_build_no_cache, forge_build_with_cache) +3. Generate comparison tables using criterion-table +4. Create the final LATEST.md report + +### Run individual benchmark suites ```bash -cargo bench forge_test -cargo bench forge_build_no_cache -cargo bench forge_build_with_cache +./run_benchmarks.sh ``` -### Generate HTML reports - -Criterion automatically generates HTML reports in `target/criterion/`. Open the reports in a browser: +### Run specific benchmark ```bash -open target/criterion/report/index.html +cargo criterion --bench forge_test +cargo criterion --bench forge_build_no_cache +cargo criterion --bench forge_build_with_cache ``` ## Benchmark Structure From a3962020938a754b4af39d7edf6114acc976bfa3 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 27 Jun 2025 15:41:45 +0530 Subject: [PATCH 6/9] feat: enhance benchmarking suite with version flexibility MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Add `get_benchmark_versions()` helper to read versions from env var - Update all benchmarks to use version helper for consistency - Add `--versions` and `--force-install` flags to shell script - Enable all three benchmarks (forge_test, build_no_cache, build_with_cache) - Improve error handling for corrupted forge installations - Remove complex workarounds in favor of clear error messages The benchmarks now support custom versions via: ./run_benchmarks.sh --versions stable,nightly,v1.2.0 🤖 Generated with Claude Code Co-Authored-By: Claude --- Cargo.lock | 14 ++--- benches/Cargo.toml | 2 +- benches/LATEST.md | 44 ++++++-------- benches/forge_build_no_cache.rs | 13 +++-- benches/forge_build_with_cache.rs | 15 ++--- benches/forge_test.rs | 14 +++-- benches/run_benchmarks.sh | 96 +++++++++++++++++++++---------- benches/src/lib.rs | 66 +++++++++++++-------- 8 files changed, 163 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e1b8c84e76151..9a469d70d4a4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -588,7 +588,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "thiserror 2.0.12", @@ -4332,7 +4332,7 @@ dependencies = [ "fs_extra", "futures-util", "home", - "itertools 0.13.0", + "itertools 0.14.0", "path-slash", "rand 0.8.5", "rayon", @@ -7263,7 +7263,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.103", @@ -8795,7 +8795,7 @@ dependencies = [ "derive_builder", "derive_more 2.0.1", "dunce", - "itertools 0.13.0", + "itertools 0.14.0", "itoa", "lasso", "match_cfg", @@ -8807,7 +8807,7 @@ dependencies = [ "solar-config", "solar-data-structures", "solar-macros", - "thiserror 1.0.69", + "thiserror 2.0.12", "tracing", "unicode-width 0.2.0", ] @@ -8832,7 +8832,7 @@ dependencies = [ "alloy-primitives", "bitflags 2.9.1", "bumpalo", - "itertools 0.13.0", + "itertools 0.14.0", "memchr", "num-bigint", "num-rational", @@ -9143,7 +9143,7 @@ dependencies = [ "serde_json", "sha2 0.10.9", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.12", "url", "zip", ] diff --git a/benches/Cargo.toml b/benches/Cargo.toml index faa0a13a04d63..81c3c71e3a829 100644 --- a/benches/Cargo.toml +++ b/benches/Cargo.toml @@ -32,4 +32,4 @@ chrono = { version = "0.4", features = ["serde"] } rayon.workspace = true [dev-dependencies] -foundry-test-utils.workspace = true \ No newline at end of file +foundry-test-utils.workspace = true diff --git a/benches/LATEST.md b/benches/LATEST.md index 6fde175ce3364..354ca2f054897 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,7 +1,7 @@ # Foundry Benchmarking Results -**Generated on:** Wed Jun 25 18:27:00 IST 2025 -**Foundry Versions Tested:** stable nightly +**Generated on:** Fri 27 Jun 2025 15:16:52 IST +**Foundry Versions Tested:** stable nightly ## Repositories Tested @@ -18,54 +18,46 @@ This report contains comprehensive benchmarking results comparing different Foun The following benchmarks were performed: 1. **forge-test** - Running the test suite (10 samples each) -2. **forge-build-no-cache** - Clean build without cache (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) 3. **forge-build-with-cache** - Build with warm cache (10 samples each) --- -## Performance Comparison Tables - # Benchmarks ## Table of Contents - [Benchmark Results](#benchmark-results) - - [forge-test](#forge-test) - - [forge-build-no-cache](#forge-build-no-cache) - - [forge-build-with-cache](#forge-build-with-cache) + - [forge-build-with-cache](#forge-build-with-cache) ## Benchmark Results -### forge-test - -| | `stable` | `nightly` | -| :---------------------- | :---------------------- | :----------------------------- | -| **`ithacaxyz-account`** | `3.73 s` (✅ **1.00x**) | `3.30 s` (✅ **1.13x faster**) | - -### forge-build-no-cache - -| | `stable` | `nightly` | -| :---------------------- | :----------------------- | :------------------------------ | -| **`ithacaxyz-account`** | `14.32 s` (✅ **1.00x**) | `14.37 s` (✅ **1.00x slower**) | - ### forge-build-with-cache -| | `stable` | `nightly` | -| :---------------------- | :------------------------- | :-------------------------------- | -| **`ithacaxyz-account`** | `162.64 ms` (✅ **1.00x**) | `167.49 ms` (✅ **1.03x slower**) | +| | `stable` | `nightly` | +|:------------------------|:--------------------------|:--------------------------------- | +| **`ithacaxyz-account`** | `166.32 ms` (✅ **1.00x**) | `171.37 ms` (✅ **1.03x slower**) | --- - Made with [criterion-table](https://github.com/nu11ptr/criterion-table) +## Notes + +- All benchmarks use Criterion.rs for statistical analysis +- Each benchmark runs 10 samples by default +- Results show mean execution time with confidence intervals +- Repositories are cloned once and reused across all Foundry versions +- Build and setup operations are parallelized using Rayon +- The first version tested becomes the baseline for comparisons + ## System Information - **OS:** Darwin - **Architecture:** arm64 -- **Date:** Wed Jun 25 18:27:01 IST 2025 +- **Date:** Fri 27 Jun 2025 15:16:52 IST ## Raw Data Detailed benchmark data and HTML reports are available in: - - `target/criterion/` - Individual benchmark reports + diff --git a/benches/forge_build_no_cache.rs b/benches/forge_build_no_cache.rs index 4b4f1b94b125e..f7b4a2d13f527 100644 --- a/benches/forge_build_no_cache.rs +++ b/benches/forge_build_no_cache.rs @@ -1,5 +1,7 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE}; +use foundry_bench::{ + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, +}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; fn benchmark_forge_build_no_cache(c: &mut Criterion) { @@ -16,15 +18,18 @@ fn benchmark_forge_build_no_cache(c: &mut Criterion) { }) .collect(); - for &version in FOUNDRY_VERSIONS { + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { // Switch foundry version - switch_foundry_version(version).expect("Failed to switch foundry version"); + switch_foundry_version(&version).expect("Failed to switch foundry version"); // Run benchmarks for each project for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-build-no-cache/{version}/{repo_name} - let bench_id = BenchmarkId::new(version, repo_config.name); + let bench_id = BenchmarkId::new(&version, repo_config.name); group.bench_function(bench_id, |b| { b.iter(|| { diff --git a/benches/forge_build_with_cache.rs b/benches/forge_build_with_cache.rs index e8df74732eda4..154dd8a7f6100 100644 --- a/benches/forge_build_with_cache.rs +++ b/benches/forge_build_with_cache.rs @@ -1,6 +1,6 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; use foundry_bench::{ - switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE, + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, }; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; @@ -12,17 +12,18 @@ fn benchmark_forge_build_with_cache(c: &mut Criterion) { let projects: Vec<_> = BENCHMARK_REPOS .par_iter() .map(|repo_config| { - // Setup: prepare project (clone repo) let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); (repo_config, project) }) .collect(); - for &version in FOUNDRY_VERSIONS { + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { // Switch foundry version once per version - switch_foundry_version(version).expect("Failed to switch foundry version"); + switch_foundry_version(&version).expect("Failed to switch foundry version"); - // Prime the cache for all projects in parallel projects.par_iter().for_each(|(_repo_config, project)| { let _ = project.run_forge_build(false); }); @@ -31,10 +32,10 @@ fn benchmark_forge_build_with_cache(c: &mut Criterion) { for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-build-with-cache/{version}/{repo_name} - let bench_id = BenchmarkId::new(version, repo_config.name); - + let bench_id = BenchmarkId::new(&version, repo_config.name); group.bench_function(bench_id, |b| { b.iter(|| { + println!("Benching: forge-build-with-cache/{}/{}", version, repo_config.name); let output = project.run_forge_build(false).expect("forge build failed"); black_box(output); }); diff --git a/benches/forge_test.rs b/benches/forge_test.rs index 6832aee005d0c..b12b30a067b47 100644 --- a/benches/forge_test.rs +++ b/benches/forge_test.rs @@ -1,6 +1,9 @@ use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; -use foundry_bench::{switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, FOUNDRY_VERSIONS, SAMPLE_SIZE}; +use foundry_bench::{ + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, +}; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; + fn benchmark_forge_test(c: &mut Criterion) { let mut group = c.benchmark_group("forge-test"); group.sample_size(SAMPLE_SIZE); @@ -15,9 +18,12 @@ fn benchmark_forge_test(c: &mut Criterion) { }) .collect(); - for &version in FOUNDRY_VERSIONS { + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { // Switch foundry version once per version - switch_foundry_version(version).expect("Failed to switch foundry version"); + switch_foundry_version(&version).expect("Failed to switch foundry version"); // Build all projects in parallel for this foundry version projects.par_iter().for_each(|(_repo_config, project)| { @@ -28,7 +34,7 @@ fn benchmark_forge_test(c: &mut Criterion) { for (repo_config, project) in &projects { // Format: table_name/column_name/row_name // This creates: forge-test/{version}/{repo_name} - let bench_id = BenchmarkId::new(version, repo_config.name); + let bench_id = BenchmarkId::new(&version, repo_config.name); group.bench_function(bench_id, |b| { b.iter(|| { diff --git a/benches/run_benchmarks.sh b/benches/run_benchmarks.sh index 1a30506a9fdc8..b9ddfe378c25d 100755 --- a/benches/run_benchmarks.sh +++ b/benches/run_benchmarks.sh @@ -48,12 +48,23 @@ check_dependencies() { fi } -# Check and install all required Foundry versions -check_and_install_foundry() { - log_info "Checking and installing required Foundry versions..." +# Install Foundry versions if requested +install_foundry_versions() { + if [[ "$FORCE_INSTALL" != "true" ]]; then + return + fi - # Read the versions from the Rust source - local versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"') + local versions + + # Use custom versions if provided, otherwise read from lib.rs + if [[ -n "$CUSTOM_VERSIONS" ]]; then + versions=$(echo "$CUSTOM_VERSIONS" | tr ',' ' ') + log_info "Installing custom Foundry versions: $versions" + else + # Read the versions from the Rust source + versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"') + log_info "Installing default Foundry versions from lib.rs: $versions" + fi # Check if foundryup is available if ! command -v foundryup &> /dev/null; then @@ -62,25 +73,18 @@ check_and_install_foundry() { exit 1 fi - # Install each version if not already available + # Install each version for version in $versions; do - log_info "Checking Foundry version: $version" - - # Try to switch to the version to check if it's installed - if foundryup --use "$version" 2>/dev/null; then - log_info "✓ Version $version is already installed" + log_info "Installing Foundry version: $version" + if foundryup --install "$version"; then + log_success "✓ Successfully installed version $version" else - log_info "Installing Foundry version: $version" - if foundryup --install "$version"; then - log_success "✓ Successfully installed version $version" - else - log_error "Failed to install Foundry version: $version" - exit 1 - fi + log_error "Failed to install Foundry version: $version" + exit 1 fi done - log_success "All required Foundry versions are available" + log_success "All Foundry versions installed successfully" } # Get system information @@ -99,6 +103,12 @@ get_system_info() { run_benchmarks() { log_info "Running Foundry benchmarks..." + # Set environment variable for custom versions if provided + if [[ -n "$CUSTOM_VERSIONS" ]]; then + export FOUNDRY_BENCH_VERSIONS="$CUSTOM_VERSIONS" + log_info "Set FOUNDRY_BENCH_VERSIONS=$CUSTOM_VERSIONS" + fi + # Create temp files for each benchmark local temp_dir=$(mktemp -d) local forge_test_json="$temp_dir/forge_test.json" @@ -179,7 +189,12 @@ generate_report() { local timestamp=$(date) # Get repository information and create numbered list with links - local versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + local versions + if [[ -n "$CUSTOM_VERSIONS" ]]; then + versions=$(echo "$CUSTOM_VERSIONS" | tr ',' ' ') + else + versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + fi # Extract repository info for numbered list local repo_list="" @@ -256,8 +271,8 @@ main() { # Check dependencies check_dependencies - # Check and install required Foundry versions - check_and_install_foundry + # Install Foundry versions if --force-install is used + install_foundry_versions # Run benchmarks and generate report run_benchmarks @@ -280,18 +295,25 @@ USAGE: $0 [OPTIONS] OPTIONS: - -h, --help Show this help message - -v, --version Show version information - --verbose Show benchmark output (by default output is suppressed) + -h, --help Show this help message + -v, --version Show version information + --verbose Show benchmark output (by default output is suppressed) + --versions Comma-separated list of Foundry versions to test + (e.g. stable,nightly,v1.2.0) + If not specified, uses versions from src/lib.rs + --force-install Force installation of Foundry versions + By default, assumes versions are already installed REQUIREMENTS: - criterion-table: cargo install criterion-table - cargo-criterion: cargo install cargo-criterion - - All Foundry versions defined in src/lib.rs must be installed + - Foundry versions must be installed (or use --force-install) EXAMPLES: - $0 # Run all benchmarks and generate LATEST.md - $0 --verbose # Run benchmarks with full output visible + $0 # Run with default versions + $0 --verbose # Show full output + $0 --versions stable,nightly # Test specific versions + $0 --versions stable,nightly --force-install # Install and test versions The script will: 1. Run forge_test, forge_build_no_cache, and forge_build_with_cache benchmarks @@ -302,8 +324,12 @@ The script will: EOF } -# Parse command line arguments +# Default values VERBOSE=false +FORCE_INSTALL=false +CUSTOM_VERSIONS="" + +# Parse command line arguments while [[ $# -gt 0 ]]; do case $1 in -h|--help) @@ -318,6 +344,18 @@ while [[ $# -gt 0 ]]; do VERBOSE=true shift ;; + --force-install) + FORCE_INSTALL=true + shift + ;; + --versions) + if [[ -z "$2" ]] || [[ "$2" == --* ]]; then + log_error "--versions requires a comma-separated list of versions" + exit 1 + fi + CUSTOM_VERSIONS="$2" + shift 2 + ;; *) log_error "Unknown option: $1" echo "Use -h or --help for usage information" diff --git a/benches/src/lib.rs b/benches/src/lib.rs index 5d5814103a6e8..b1c74b18c6142 100644 --- a/benches/src/lib.rs +++ b/benches/src/lib.rs @@ -2,6 +2,7 @@ use eyre::{Result, WrapErr}; use foundry_compilers::project_util::TempProject; use foundry_test_utils::util::clone_remote; use std::{ + env, path::{Path, PathBuf}, process::{Command, Output}, }; @@ -93,40 +94,32 @@ impl BenchmarkProject { } } - // Install dependencies - Self::install_dependencies(&root_path)?; + // Git submodules are already cloned via --recursive flag + // But npm dependencies still need to be installed + Self::install_npm_dependencies(&root_path)?; + println!(" ✅ Project {} setup complete at {}", config.name, root); Ok(BenchmarkProject { name: config.name.to_string(), root_path, temp_project }) } - /// Install forge dependencies for the project - fn install_dependencies(root: &Path) -> Result<()> { - // Install forge dependencies if foundry.toml exists - if root.join("foundry.toml").exists() { - let status = Command::new("forge") - .current_dir(root) - .args(["install"]) - .status() - .wrap_err("Failed to run forge install")?; - - if !status.success() { - println!("Warning: forge install failed for {}", root.display()); - } - } - - // Install npm dependencies if package.json exists + /// Install npm dependencies if package.json exists + fn install_npm_dependencies(root: &Path) -> Result<()> { if root.join("package.json").exists() { + println!(" 📦 Running npm install..."); let status = Command::new("npm") .current_dir(root) .args(["install"]) + .stdout(std::process::Stdio::inherit()) + .stderr(std::process::Stdio::inherit()) .status() .wrap_err("Failed to run npm install")?; if !status.success() { - println!("Warning: npm install failed for {}", root.display()); + println!(" ⚠️ Warning: npm install failed with exit code: {:?}", status.code()); + } else { + println!(" ✅ npm install completed successfully"); } } - Ok(()) } @@ -161,15 +154,23 @@ impl BenchmarkProject { /// Switch to a specific foundry version pub fn switch_foundry_version(version: &str) -> Result<()> { - let status = Command::new("foundryup") + let output = Command::new("foundryup") .args(["--use", version]) - .status() + .output() .wrap_err("Failed to run foundryup")?; - if !status.success() { + // Check if the error is about forge --version failing + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("command failed") && stderr.contains("forge --version") { + eyre::bail!("Foundry binaries maybe corrupted. Please reinstall, please run `foundryup` and install the required versions."); + } + + if !output.status.success() { + eprintln!("foundryup stderr: {}", stderr); eyre::bail!("Failed to switch to foundry version: {}", version); } + println!(" Successfully switched to version: {}", version); Ok(()) } @@ -189,3 +190,22 @@ pub fn get_forge_version() -> Result { Ok(version.lines().next().unwrap_or("unknown").to_string()) } + +/// Get Foundry versions to benchmark from environment variable or default +/// +/// Reads from FOUNDRY_BENCH_VERSIONS environment variable if set, +/// otherwise returns the default versions from FOUNDRY_VERSIONS constant. +/// +/// The environment variable should be a comma-separated list of versions, +/// e.g., "stable,nightly,v1.2.0" +pub fn get_benchmark_versions() -> Vec { + if let Ok(versions_env) = env::var("FOUNDRY_BENCH_VERSIONS") { + versions_env + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect() + } else { + FOUNDRY_VERSIONS.iter().map(|&s| s.to_string()).collect() + } +} From fcba24231c4c584c712abb9fbe72308256f14c0f Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 27 Jun 2025 15:58:07 +0530 Subject: [PATCH 7/9] latest bench --- benches/LATEST.md | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/benches/LATEST.md b/benches/LATEST.md index 354ca2f054897..27fb56662a7d4 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,6 +1,6 @@ # Foundry Benchmarking Results -**Generated on:** Fri 27 Jun 2025 15:16:52 IST +**Generated on:** Fri 27 Jun 2025 15:51:19 IST **Foundry Versions Tested:** stable nightly ## Repositories Tested @@ -28,15 +28,29 @@ The following benchmarks were performed: ## Table of Contents - [Benchmark Results](#benchmark-results) + - [forge-test](#forge-test) + - [forge-build-no-cache](#forge-build-no-cache) - [forge-build-with-cache](#forge-build-with-cache) ## Benchmark Results +### forge-test + +| | `stable` | `nightly` | +|:------------------------|:-----------------------|:------------------------------ | +| **`ithacaxyz-account`** | `3.75 s` (✅ **1.00x**) | `3.27 s` (✅ **1.15x faster**) | + +### forge-build-no-cache + +| | `stable` | `nightly` | +|:------------------------|:------------------------|:------------------------------- | +| **`ithacaxyz-account`** | `14.23 s` (✅ **1.00x**) | `14.25 s` (✅ **1.00x slower**) | + ### forge-build-with-cache | | `stable` | `nightly` | |:------------------------|:--------------------------|:--------------------------------- | -| **`ithacaxyz-account`** | `166.32 ms` (✅ **1.00x**) | `171.37 ms` (✅ **1.03x slower**) | +| **`ithacaxyz-account`** | `163.53 ms` (✅ **1.00x**) | `168.00 ms` (✅ **1.03x slower**) | --- Made with [criterion-table](https://github.com/nu11ptr/criterion-table) @@ -54,7 +68,7 @@ Made with [criterion-table](https://github.com/nu11ptr/criterion-table) - **OS:** Darwin - **Architecture:** arm64 -- **Date:** Fri 27 Jun 2025 15:16:52 IST +- **Date:** Fri 27 Jun 2025 15:51:19 IST ## Raw Data From 858d8d99daf8d1addaf37c21375bf864d7db0be5 Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 27 Jun 2025 16:05:31 +0530 Subject: [PATCH 8/9] rm notes --- benches/LATEST.md | 40 ++++++++++++++++----------------------- benches/run_benchmarks.sh | 10 +--------- 2 files changed, 17 insertions(+), 33 deletions(-) diff --git a/benches/LATEST.md b/benches/LATEST.md index 27fb56662a7d4..a2c4e59b5931b 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,7 +1,7 @@ # Foundry Benchmarking Results **Generated on:** Fri 27 Jun 2025 15:51:19 IST -**Foundry Versions Tested:** stable nightly +**Foundry Versions Tested:** stable nightly ## Repositories Tested @@ -18,7 +18,7 @@ This report contains comprehensive benchmarking results comparing different Foun The following benchmarks were performed: 1. **forge-test** - Running the test suite (10 samples each) -2. **forge-build-no-cache** - Clean build without cache (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) 3. **forge-build-with-cache** - Build with warm cache (10 samples each) --- @@ -28,41 +28,33 @@ The following benchmarks were performed: ## Table of Contents - [Benchmark Results](#benchmark-results) - - [forge-test](#forge-test) - - [forge-build-no-cache](#forge-build-no-cache) - - [forge-build-with-cache](#forge-build-with-cache) + - [forge-test](#forge-test) + - [forge-build-no-cache](#forge-build-no-cache) + - [forge-build-with-cache](#forge-build-with-cache) ## Benchmark Results ### forge-test -| | `stable` | `nightly` | -|:------------------------|:-----------------------|:------------------------------ | -| **`ithacaxyz-account`** | `3.75 s` (✅ **1.00x**) | `3.27 s` (✅ **1.15x faster**) | +| | `stable` | `nightly` | +| :---------------------- | :---------------------- | :----------------------------- | +| **`ithacaxyz-account`** | `3.75 s` (✅ **1.00x**) | `3.27 s` (✅ **1.15x faster**) | ### forge-build-no-cache -| | `stable` | `nightly` | -|:------------------------|:------------------------|:------------------------------- | -| **`ithacaxyz-account`** | `14.23 s` (✅ **1.00x**) | `14.25 s` (✅ **1.00x slower**) | +| | `stable` | `nightly` | +| :---------------------- | :----------------------- | :------------------------------ | +| **`ithacaxyz-account`** | `14.23 s` (✅ **1.00x**) | `14.25 s` (✅ **1.00x slower**) | ### forge-build-with-cache -| | `stable` | `nightly` | -|:------------------------|:--------------------------|:--------------------------------- | -| **`ithacaxyz-account`** | `163.53 ms` (✅ **1.00x**) | `168.00 ms` (✅ **1.03x slower**) | +| | `stable` | `nightly` | +| :---------------------- | :------------------------- | :-------------------------------- | +| **`ithacaxyz-account`** | `163.53 ms` (✅ **1.00x**) | `168.00 ms` (✅ **1.03x slower**) | --- -Made with [criterion-table](https://github.com/nu11ptr/criterion-table) - -## Notes -- All benchmarks use Criterion.rs for statistical analysis -- Each benchmark runs 10 samples by default -- Results show mean execution time with confidence intervals -- Repositories are cloned once and reused across all Foundry versions -- Build and setup operations are parallelized using Rayon -- The first version tested becomes the baseline for comparisons +Made with [criterion-table](https://github.com/nu11ptr/criterion-table) ## System Information @@ -73,5 +65,5 @@ Made with [criterion-table](https://github.com/nu11ptr/criterion-table) ## Raw Data Detailed benchmark data and HTML reports are available in: -- `target/criterion/` - Individual benchmark reports +- `target/criterion/` - Individual benchmark reports diff --git a/benches/run_benchmarks.sh b/benches/run_benchmarks.sh index b9ddfe378c25d..a4b343a0ac226 100755 --- a/benches/run_benchmarks.sh +++ b/benches/run_benchmarks.sh @@ -239,16 +239,8 @@ EOF # Append the criterion-table generated tables cat "$tables_file" >> "$report_file" - # Add notes and system info + # Add system info cat >> "$report_file" << EOF -## Notes - -- All benchmarks use Criterion.rs for statistical analysis -- Each benchmark runs 10 samples by default -- Results show mean execution time with confidence intervals -- Repositories are cloned once and reused across all Foundry versions -- Build and setup operations are parallelized using Rayon -- The first version tested becomes the baseline for comparisons ## System Information From 44835cf9cfd0b95e4029b844d0df925f89492b9e Mon Sep 17 00:00:00 2001 From: Yash Atreya <44857776+yash-atreya@users.noreply.github.com> Date: Fri, 27 Jun 2025 16:20:10 +0530 Subject: [PATCH 9/9] remove shell based bench suite --- benches/benchmark.sh | 577 --------------------- benches/commands/forge_build_no_cache.sh | 45 -- benches/commands/forge_build_with_cache.sh | 47 -- benches/commands/forge_test.sh | 47 -- benches/repos_and_versions.sh | 44 -- 5 files changed, 760 deletions(-) delete mode 100755 benches/benchmark.sh delete mode 100755 benches/commands/forge_build_no_cache.sh delete mode 100755 benches/commands/forge_build_with_cache.sh delete mode 100755 benches/commands/forge_test.sh delete mode 100755 benches/repos_and_versions.sh diff --git a/benches/benchmark.sh b/benches/benchmark.sh deleted file mode 100755 index 3d1e92aa62d9d..0000000000000 --- a/benches/benchmark.sh +++ /dev/null @@ -1,577 +0,0 @@ -#!/bin/bash - -# Foundry Multi-Version Benchmarking Suite -# This script benchmarks forge test and forge build commands across multiple repositories -# and multiple Foundry versions for comprehensive performance comparison - -set -e - -# Main execution -main() { - log_info "Starting Foundry Multi-Version Benchmarking Suite..." - log_info "Testing Foundry versions: ${FOUNDRY_VERSIONS[*]}" - log_info "Testing repositories: ${REPO_NAMES[*]}" - - # Setup - check_dependencies - setup_directories - - # Ensure cleanup on exit - trap cleanup EXIT - - # Install all Foundry versions upfront - install_all_foundry_versions - - # Clone/update repositories - for i in "${!REPO_NAMES[@]}"; do - clone_or_update_repo "${REPO_NAMES[$i]}" "${REPO_URLS[$i]}" - install_dependencies "${BENCHMARK_DIR}/${REPO_NAMES[$i]}" "${REPO_NAMES[$i]}" - done - - # Run benchmarks in parallel - benchmark_all_repositories_parallel - - # Compile results - compile_results - - log_success "Benchmarking complete!" - log_success "Results saved to: $RESULTS_FILE" - log_success "Latest results: $LATEST_RESULTS_FILE" - log_success "Raw JSON data saved to: $JSON_RESULTS_DIR" - log_info "You can view the results with: cat $LATEST_RESULTS_FILE" -} - -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Configuration -BENCHMARK_DIR="${SCRIPT_DIR}/benchmark_repos" -RESULTS_DIR="${SCRIPT_DIR}/benchmark_results" -TIMESTAMP=$(date +"%Y%m%d_%H%M%S") -RESULTS_FILE="${RESULTS_DIR}/foundry_multi_version_benchmark_${TIMESTAMP}.md" -LATEST_RESULTS_FILE="${SCRIPT_DIR}/LATEST.md" -JSON_RESULTS_DIR="${RESULTS_DIR}/json_${TIMESTAMP}" - -# Load configuration -source "${SCRIPT_DIR}/repos_and_versions.sh" - -# Load benchmark commands -source "${SCRIPT_DIR}/commands/forge_test.sh" -source "${SCRIPT_DIR}/commands/forge_build_no_cache.sh" -source "${SCRIPT_DIR}/commands/forge_build_with_cache.sh" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Helper functions -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -log_warn() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Install foundryup if not present -install_foundryup() { - if ! command -v foundryup &> /dev/null; then - log_info "Installing foundryup..." - curl -L https://foundry.paradigm.xyz | bash - # Source the bashrc/profile to get foundryup in PATH - export PATH="$HOME/.foundry/bin:$PATH" - fi -} - -# Install a specific Foundry version -install_foundry_version() { - local version=$1 - log_info "Installing Foundry version: $version" - - # Let foundryup handle any version format and determine validity - if foundryup --install "$version"; then - # Verify installation - local installed_version=$(forge --version | head -n1 || echo "unknown") - log_success "Installed Foundry: $installed_version" - return 0 - else - log_error "Failed to install Foundry $version" - return 1 - fi -} - -# Switch to a specific installed Foundry version -use_foundry_version() { - local version=$1 - log_info "Switching to Foundry version: $version" - - if foundryup --use "$version"; then - # Verify switch - local current_version=$(forge --version | head -n1 || echo "unknown") - log_success "Now using Foundry: $current_version" - return 0 - else - log_error "Failed to switch to Foundry $version" - return 1 - fi -} - -# Install all required Foundry versions upfront -install_all_foundry_versions() { - log_info "Installing all required Foundry versions as preprocessing step..." - - local failed_versions=() - - for version in "${FOUNDRY_VERSIONS[@]}"; do - if ! install_foundry_version "$version"; then - failed_versions+=("$version") - fi - done - - if [ ${#failed_versions[@]} -ne 0 ]; then - log_error "Failed to install the following Foundry versions: ${failed_versions[*]}" - log_error "Please check the version names and try again" - exit 1 - fi - - log_success "All Foundry versions installed successfully!" - - # List all installed versions for verification - log_info "Available installed versions:" - foundryup --list || log_warn "Could not list installed versions" -} - -# Check if required tools are installed -check_dependencies() { - local missing_deps=() - - if ! command -v hyperfine &> /dev/null; then - missing_deps+=("hyperfine") - fi - - if ! command -v git &> /dev/null; then - missing_deps+=("git") - fi - - if ! command -v curl &> /dev/null; then - missing_deps+=("curl") - fi - - if [ ${#missing_deps[@]} -ne 0 ]; then - log_error "Missing required dependencies: ${missing_deps[*]}" - log_info "Install hyperfine: https://github.com/sharkdp/hyperfine#installation" - exit 1 - fi - - # Install foundryup if needed - install_foundryup -} - -# Setup directories -setup_directories() { - log_info "Setting up benchmark directories..." - mkdir -p "$BENCHMARK_DIR" - mkdir -p "$RESULTS_DIR" - mkdir -p "$JSON_RESULTS_DIR" -} - -# Clone or update repository -clone_or_update_repo() { - local name=$1 - local url=$2 - local repo_dir="${BENCHMARK_DIR}/${name}" - - if [ -d "$repo_dir" ]; then - log_info "Updating existing repository: $name" - cd "$repo_dir" - git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true - cd - > /dev/null - else - log_info "Cloning repository: $name" - git clone "$url" "$repo_dir" - fi -} - -# Install dependencies for a repository -install_dependencies() { - local repo_dir=$1 - local repo_name=$2 - - log_info "Installing dependencies for $repo_name..." - cd "$repo_dir" - - # Install forge dependencies - if [ -f "foundry.toml" ]; then - forge install 2>/dev/null || true - fi - - # Install npm dependencies if package.json exists - if [ -f "package.json" ]; then - if command -v npm &> /dev/null; then - npm install 2>/dev/null || true - fi - fi - - cd - > /dev/null -} - -# Run benchmarks for a single repository with a specific Foundry version -benchmark_repository_for_version() { - local repo_name=$1 - local version=$2 - local repo_dir="${BENCHMARK_DIR}/${repo_name}" - - # Create a unique log file for this repo+version combination - local log_file="${JSON_RESULTS_DIR}/${repo_name}_${version//[^a-zA-Z0-9]/_}_benchmark.log" - - { - echo "$(date): Starting benchmark for $repo_name with Foundry $version" - - if [ ! -d "$repo_dir" ]; then - echo "ERROR: Repository directory not found: $repo_dir" - return 1 - fi - - cd "$repo_dir" - - # Check if it's a valid Foundry project - if [ ! -f "foundry.toml" ]; then - echo "WARN: No foundry.toml found in $repo_name, skipping..." - cd - > /dev/null - return 0 - fi - - # Clean version string for filenames (remove 'v' prefix, replace '.' with '_') - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - - local version_results_dir="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}" - mkdir -p "$version_results_dir" - - echo "Running benchmarks for $repo_name with Foundry $version..." - - # Run all benchmark commands - fail fast if any command fails - benchmark_forge_test "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge test benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - benchmark_forge_build_no_cache "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge build (no cache) benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - benchmark_forge_build_with_cache "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge build (with cache) benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - - # Store version info for this benchmark - forge --version | head -n1 > "${version_results_dir}/forge_version.txt" 2>/dev/null || echo "unknown" > "${version_results_dir}/forge_version.txt" - - cd - > /dev/null - echo "$(date): Completed benchmark for $repo_name with Foundry $version" - - } > "$log_file" 2>&1 -} - -# Run benchmarks for all repositories in parallel for each Foundry version -benchmark_all_repositories_parallel() { - for version in "${FOUNDRY_VERSIONS[@]}"; do - log_info "Switching to Foundry version: $version" - - # Switch to the pre-installed version - use_foundry_version "$version" || { - log_warn "Failed to switch to Foundry $version, skipping all repositories for this version..." - continue - } - - log_info "Starting parallel benchmarks for all repositories with Foundry $version" - - # Launch all repositories in parallel - local pids=() - - for repo_name in "${REPO_NAMES[@]}"; do - # Check if repo directory exists and is valid before starting background process - local repo_dir="${BENCHMARK_DIR}/${repo_name}" - if [ ! -d "$repo_dir" ]; then - log_warn "Repository directory not found: $repo_dir, skipping..." - continue - fi - - if [ ! -f "${repo_dir}/foundry.toml" ]; then - log_warn "No foundry.toml found in $repo_name, skipping..." - continue - fi - - log_info "Launching background benchmark for $repo_name..." - benchmark_repository_for_version "$repo_name" "$version" & - local pid=$! - pids+=($pid) - echo "$repo_name:$pid" >> "${JSON_RESULTS_DIR}/parallel_pids_${version//[^a-zA-Z0-9]/_}.txt" - done - - # Wait for all repositories to complete - log_info "Waiting for ${#pids[@]} parallel benchmarks to complete for Foundry $version..." - local completed=0 - local total=${#pids[@]} - - for pid in "${pids[@]}"; do - if wait "$pid"; then - completed=$((completed + 1)) - log_info "Progress: $completed/$total repositories completed for Foundry $version" - else - log_error "Benchmark process failed (PID: $pid) for Foundry $version" - exit 1 - fi - done - - log_success "All repositories completed for Foundry $version ($completed/$total successful)" - - # Show summary of log files created - log_info "Individual benchmark logs available in: ${JSON_RESULTS_DIR}/*_${version//[^a-zA-Z0-9]/_}_benchmark.log" - done -} - -# Extract mean time from JSON result file -extract_mean_time() { - local json_file=$1 - if [ -f "$json_file" ]; then - # Extract mean time in seconds, format to 3 decimal places - python3 -c " -import json, sys -try: - with open('$json_file') as f: - data = json.load(f) - mean_time = data['results'][0]['mean'] - print(f'{mean_time:.3f}') -except: - print('N/A') -" 2>/dev/null || echo "N/A" - else - echo "N/A" - fi -} - -# Get Foundry version string from file -get_forge_version() { - local version_file=$1 - if [ -f "$version_file" ]; then - cat "$version_file" | sed 's/forge //' | sed 's/ (.*//' - else - echo "unknown" - fi -} - -# Compile results into markdown with comparison tables -compile_results() { - log_info "Compiling benchmark results..." - - cat > "$RESULTS_FILE" << EOF -# Forge Benchmarking Results - -**Generated on:** $(date) -**Hyperfine Version:** $(hyperfine --version) -**Foundry Versions Tested:** ${FOUNDRY_VERSIONS[*]} -**Repositories Tested:** ${REPO_NAMES[*]} - -## Summary - -This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects. -The following benchmarks were performed: - -1. **$(get_forge_test_description)** -2. **$(get_forge_build_no_cache_description)** -3. **$(get_forge_build_with_cache_description)** - ---- - -## Performance Comparison Tables - -EOF - - # Create unified comparison tables for each benchmark type - local benchmark_commands=("forge_test" "forge_build_no_cache" "forge_build_with_cache") - - for cmd in "${benchmark_commands[@]}"; do - local bench_name="${cmd//_/ }" - local bench_type=$(get_${cmd}_type) - local json_filename=$(get_${cmd}_json_filename) - - echo "### $bench_name" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - echo "Mean execution time in seconds (lower is better):" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - # Create table header with proper column names - local header_row="| Project" - for version in "${FOUNDRY_VERSIONS[@]}"; do - header_row+=" | $version (s)" - done - header_row+=" |" - echo "$header_row" >> "$RESULTS_FILE" - - # Create table separator with proper alignment - local separator_row="|------" - for version in "${FOUNDRY_VERSIONS[@]}"; do - separator_row+="|--------:" - done - separator_row+="|" - echo "$separator_row" >> "$RESULTS_FILE" - - # Add data rows - for repo_name in "${REPO_NAMES[@]}"; do - local data_row="| **$repo_name**" - - for version in "${FOUNDRY_VERSIONS[@]}"; do - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - local version_results_dir="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}" - local json_file="${version_results_dir}/${json_filename}" - - local mean_time=$(extract_mean_time "$json_file") - data_row+=" | $mean_time" - done - data_row+=" |" - echo "$data_row" >> "$RESULTS_FILE" - done - echo "" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - done - - # Add detailed version information - echo "## Foundry Version Details" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - for version in "${FOUNDRY_VERSIONS[@]}"; do - echo "### $version" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - # Find any version file to get the detailed version info - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - - for repo_name in "${REPO_NAMES[@]}"; do - local version_file="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}/forge_version.txt" - if [ -f "$version_file" ]; then - echo "\`\`\`" >> "$RESULTS_FILE" - cat "$version_file" >> "$RESULTS_FILE" - echo "\`\`\`" >> "$RESULTS_FILE" - break - fi - done - echo "" >> "$RESULTS_FILE" - done - - # Add notes and system info - cat >> "$RESULTS_FILE" << EOF - -## Notes - -- All benchmarks were run with hyperfine in parallel mode -- **$(get_forge_test_description)** -- **$(get_forge_build_no_cache_description)** -- **$(get_forge_build_with_cache_description)** -- Results show mean execution time in seconds -- N/A indicates benchmark failed or data unavailable - -## System Information - -- **OS:** $(uname -s) -- **Architecture:** $(uname -m) -- **Date:** $(date) - -## Raw Data - -Raw JSON benchmark data is available in: \`$JSON_RESULTS_DIR\` - -EOF - - # Copy to LATEST.md - cp "$RESULTS_FILE" "$LATEST_RESULTS_FILE" - log_success "Latest results also saved to: $LATEST_RESULTS_FILE" -} - -# Cleanup temporary files -cleanup() { - log_info "Cleanup completed" -} - -# Parse command line arguments -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --versions) - shift - if [[ $# -eq 0 ]]; then - log_error "--versions requires a space-separated list of versions" - exit 1 - fi - # Read versions until next flag or end of args - FOUNDRY_VERSIONS=() - while [[ $# -gt 0 && ! "$1" =~ ^-- ]]; do - FOUNDRY_VERSIONS+=("$1") - shift - done - ;; - --help|-h) - echo "Foundry Benchmarking Suite" - echo "" - echo "Usage: $0 [OPTIONS]" - echo "" - echo "OPTIONS:" - echo " --help, -h Show this help message" - echo " --version, -v Show version information" - echo " --versions ... Specify Foundry versions to benchmark" - echo " (default: from repos_and_versions.sh)" - echo "" - echo "EXAMPLES:" - echo " $0 # Use default versions (parallel)" - echo " $0 --versions stable nightly # Benchmark stable and nightly only" - echo " $0 --versions v1.0.0 v1.1.0 v1.2.0 # Benchmark specific versions" - echo "" - echo "This script benchmarks forge test and forge build commands across" - echo "multiple Foundry repositories and versions using hyperfine." - - echo "Supported version formats:" - echo " - stable, nightly (special tags)" - echo " - v1.0.0, v1.1.0, etc. (specific versions)" - echo " - nightly- (specific nightly builds)" - echo " - Any format supported by foundryup" - echo "" - echo "The script will:" - echo " 1. Install foundryup if not present" - echo " 2. Install all specified Foundry versions (preprocessing step)" - echo " 3. Clone/update target repositories" - echo " 4. Switch between versions and run benchmarks in parallel" - echo " 5. Generate comparison tables in markdown format" - echo " 6. Save results to LATEST.md" - exit 0 - ;; - --version|-v) - exit 0 - ;; - *) - log_error "Unknown option: $1" - echo "Use --help for usage information" - exit 1 - ;; - esac - done -} - -# Handle command line arguments -if [[ $# -gt 0 ]]; then - parse_args "$@" -fi - -main \ No newline at end of file diff --git a/benches/commands/forge_build_no_cache.sh b/benches/commands/forge_build_no_cache.sh deleted file mode 100755 index 756528176ac4e..0000000000000 --- a/benches/commands/forge_build_no_cache.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# Forge Build (No Cache) Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge build' with no cache - -# Command configuration -FORGE_BUILD_NO_CACHE_RUNS=5 - -# Benchmark function for forge build (no cache) -benchmark_forge_build_no_cache() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge build' (no cache) benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_BUILD_NO_CACHE_RUNS" \ - --prepare 'forge clean' \ - --export-json "${version_results_dir}/build_no_cache_results.json" \ - "forge build" 2>>"$log_file.error"; then - echo "✓ forge build (no cache) completed" >> "$log_file" - return 0 - else - echo "✗ forge build (no cache) failed" >> "$log_file" - echo "FATAL: forge build (no cache) benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_build_no_cache_description() { - echo "forge build (no cache) - Clean build without cache ($FORGE_BUILD_NO_CACHE_RUNS runs, cache cleaned after each run)" -} - -# Get JSON result filename -get_forge_build_no_cache_json_filename() { - echo "build_no_cache_results.json" -} - -# Get benchmark type identifier -get_forge_build_no_cache_type() { - echo "build_no_cache" -} \ No newline at end of file diff --git a/benches/commands/forge_build_with_cache.sh b/benches/commands/forge_build_with_cache.sh deleted file mode 100755 index 25fab9dbc190f..0000000000000 --- a/benches/commands/forge_build_with_cache.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Forge Build (With Cache) Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge build' with cache - -# Command configuration -FORGE_BUILD_WITH_CACHE_RUNS=5 -FORGE_BUILD_WITH_CACHE_WARMUP=1 - -# Benchmark function for forge build (with cache) -benchmark_forge_build_with_cache() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge build' (with cache) benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_BUILD_WITH_CACHE_RUNS" \ - --prepare 'forge build' \ - --warmup "$FORGE_BUILD_WITH_CACHE_WARMUP" \ - --export-json "${version_results_dir}/build_with_cache_results.json" \ - "forge build" 2>>"$log_file.error"; then - echo "✓ forge build (with cache) completed" >> "$log_file" - return 0 - else - echo "✗ forge build (with cache) failed" >> "$log_file" - echo "FATAL: forge build (with cache) benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_build_with_cache_description() { - echo "forge build (with cache) - Build with warm cache ($FORGE_BUILD_WITH_CACHE_RUNS runs, $FORGE_BUILD_WITH_CACHE_WARMUP warmup)" -} - -# Get JSON result filename -get_forge_build_with_cache_json_filename() { - echo "build_with_cache_results.json" -} - -# Get benchmark type identifier -get_forge_build_with_cache_type() { - echo "build_with_cache" -} \ No newline at end of file diff --git a/benches/commands/forge_test.sh b/benches/commands/forge_test.sh deleted file mode 100755 index 9427d18d3c30c..0000000000000 --- a/benches/commands/forge_test.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Forge Test Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge test' - -# Command configuration -FORGE_TEST_RUNS=5 -FORGE_TEST_WARMUP=1 - -# Benchmark function for forge test -benchmark_forge_test() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge test' benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_TEST_RUNS" \ - --prepare 'forge build' \ - --warmup "$FORGE_TEST_WARMUP" \ - --export-json "${version_results_dir}/test_results.json" \ - "forge test" 2>>"$log_file.error"; then - echo "✓ forge test completed" >> "$log_file" - return 0 - else - echo "✗ forge test failed" >> "$log_file" - echo "FATAL: forge test benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_test_description() { - echo "forge test - Running the test suite ($FORGE_TEST_RUNS runs, $FORGE_TEST_WARMUP warmup)" -} - -# Get JSON result filename -get_forge_test_json_filename() { - echo "test_results.json" -} - -# Get benchmark type identifier -get_forge_test_type() { - echo "test" -} \ No newline at end of file diff --git a/benches/repos_and_versions.sh b/benches/repos_and_versions.sh deleted file mode 100755 index 07da480b05d2d..0000000000000 --- a/benches/repos_and_versions.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Foundry Multi-Version Benchmarking Configuration -# This file contains the configuration for repositories and Foundry versions to benchmark - -# Foundry versions to benchmark -# Supported formats: -# - stable, nightly (special tags) -# - v1.0.0, v1.1.0, etc. (specific versions) -# - nightly- (specific nightly builds) -# - Any format supported by foundryup -FOUNDRY_VERSIONS=( - "stable" - "nightly" -) - -# Repository configurations -# Add new repositories by adding entries to both arrays -REPO_NAMES=( - "ithacaxyz-account" - # "v4-core" - "solady" - # "morpho-blue" - # "spark-psm" -) - -REPO_URLS=( - "https://github.com/ithacaxyz/account" - # "https://github.com/Uniswap/v4-core" - "https://github.com/Vectorized/solady" - # "https://github.com/morpho-org/morpho-blue" - # "https://github.com/sparkdotfi/spark-psm" -) - -# Verify arrays have the same length -if [ ${#REPO_NAMES[@]} -ne ${#REPO_URLS[@]} ]; then - echo "ERROR: REPO_NAMES and REPO_URLS arrays must have the same length" - exit 1 -fi - -# Export variables for use in other scripts -export FOUNDRY_VERSIONS -export REPO_NAMES -export REPO_URLS \ No newline at end of file