diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml new file mode 100644 index 0000000000000..d5c6e0cecf501 --- /dev/null +++ b/.github/workflows/benchmarks.yml @@ -0,0 +1,108 @@ +name: Foundry Benchmarks + +on: + workflow_dispatch: + inputs: + pr_number: + description: "PR number to comment on (optional)" + required: false + type: string + +permissions: + contents: write + pull-requests: write + +jobs: + benchmark: + name: Run Foundry Benchmarks + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Setup Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Cache Rust dependencies + uses: Swatinem/rust-cache@v2 + with: + workspaces: | + ./ + + - name: Install foundryup + run: | + curl -L https://foundry.paradigm.xyz | bash + echo "$HOME/.foundry/bin" >> $GITHUB_PATH + + - name: Install benchmark dependencies + run: | + cargo install cargo-criterion + cargo install criterion-table + + - name: Run benchmarks + working-directory: ./benches + run: | + chmod +x run_benchmarks.sh + ./run_benchmarks.sh + + - name: Commit benchmark results + run: | + git config --local user.email "action@github.com" + git config --local user.name "GitHub Action" + git add benches/LATEST.md + if git diff --staged --quiet; then + echo "No changes to commit" + else + git commit -m "Update benchmark results + + 🤖 Generated with [Foundry Benchmarks](https://github.com/${{ github.repository }}/actions) + + Co-Authored-By: github-actions " + git push + fi + + - name: Read benchmark results + id: benchmark_results + run: | + if [ -f "benches/LATEST.md" ]; then + { + echo 'results<> $GITHUB_OUTPUT + else + echo 'results=No benchmark results found.' >> $GITHUB_OUTPUT + fi + + - name: Comment on PR + if: github.event.inputs.pr_number != '' + uses: actions/github-script@v7 + with: + script: | + const prNumber = ${{ github.event.inputs.pr_number }}; + const benchmarkResults = `${{ steps.benchmark_results.outputs.results }}`; + + const comment = `## 📊 Foundry Benchmark Results + +
+ Click to view detailed benchmark results + + ${benchmarkResults} + +
+ + --- + + 🤖 This comment was automatically generated by the [Foundry Benchmarks workflow](https://github.com/${{ github.repository }}/actions). + + To run benchmarks manually: Go to [Actions](https://github.com/${{ github.repository }}/actions/workflows/foundry-benchmarks.yml) → "Run workflow"`; + + github.rest.issues.createComment({ + issue_number: prNumber, + owner: context.repo.owner, + repo: context.repo.repo, + body: comment + }); + diff --git a/Cargo.lock b/Cargo.lock index 5b92cb3acb23a..9a469d70d4a4c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -588,7 +588,7 @@ dependencies = [ "alloy-rlp", "alloy-serde", "alloy-sol-types", - "itertools 0.13.0", + "itertools 0.14.0", "serde", "serde_json", "thiserror 2.0.12", @@ -946,6 +946,12 @@ dependencies = [ "libc", ] +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "annotate-snippets" version = "0.11.5" @@ -2361,6 +2367,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cast" version = "1.2.3" @@ -2503,8 +2515,10 @@ checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" dependencies = [ "android-tzdata", "iana-time-zone", + "js-sys", "num-traits", "serde", + "wasm-bindgen", "windows-link", ] @@ -2966,6 +2980,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f" +dependencies = [ + "anes", + "cast 0.3.0", + "ciborium", + "clap", + "criterion-plot", + "is-terminal", + "itertools 0.10.5", + "num-traits", + "once_cell", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast 0.3.0", + "itertools 0.10.5", +] + [[package]] name = "crossbeam-channel" version = "0.5.15" @@ -4047,6 +4097,23 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "foundry-bench" +version = "0.1.0" +dependencies = [ + "chrono", + "criterion", + "eyre", + "foundry-compilers", + "foundry-config", + "foundry-test-utils", + "rayon", + "serde", + "serde_json", + "tempfile", + "tokio", +] + [[package]] name = "foundry-block-explorers" version = "0.18.0" @@ -4265,7 +4332,7 @@ dependencies = [ "fs_extra", "futures-util", "home", - "itertools 0.13.0", + "itertools 0.14.0", "path-slash", "rand 0.8.5", "rayon", @@ -6483,6 +6550,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "oorandom" +version = "11.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e" + [[package]] name = "op-alloy-consensus" version = "0.17.2" @@ -6945,6 +7018,34 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plotters" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" + +[[package]] +name = "plotters-svg" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" +dependencies = [ + "plotters-backend", +] + [[package]] name = "portable-atomic" version = "1.11.1" @@ -7162,7 +7263,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.103", @@ -8694,7 +8795,7 @@ dependencies = [ "derive_builder", "derive_more 2.0.1", "dunce", - "itertools 0.13.0", + "itertools 0.14.0", "itoa", "lasso", "match_cfg", @@ -8706,7 +8807,7 @@ dependencies = [ "solar-config", "solar-data-structures", "solar-macros", - "thiserror 1.0.69", + "thiserror 2.0.12", "tracing", "unicode-width 0.2.0", ] @@ -8731,7 +8832,7 @@ dependencies = [ "alloy-primitives", "bitflags 2.9.1", "bumpalo", - "itertools 0.13.0", + "itertools 0.14.0", "memchr", "num-bigint", "num-rational", @@ -9042,7 +9143,7 @@ dependencies = [ "serde_json", "sha2 0.10.9", "tempfile", - "thiserror 1.0.69", + "thiserror 2.0.12", "url", "zip", ] @@ -9322,6 +9423,16 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tinyvec" version = "1.9.0" diff --git a/Cargo.toml b/Cargo.toml index 30e2b3d0f1603..e6c5bce09b194 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,6 @@ [workspace] members = [ + "benches/", "crates/anvil/", "crates/anvil/core/", "crates/anvil/rpc/", diff --git a/benches/Cargo.toml b/benches/Cargo.toml new file mode 100644 index 0000000000000..81c3c71e3a829 --- /dev/null +++ b/benches/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "foundry-bench" +version = "0.1.0" +edition = "2021" + +[[bench]] +name = "forge_test" +path = "forge_test.rs" +harness = false + +[[bench]] +name = "forge_build_no_cache" +path = "forge_build_no_cache.rs" +harness = false + +[[bench]] +name = "forge_build_with_cache" +path = "forge_build_with_cache.rs" +harness = false + +[dependencies] +criterion = { version = "0.5", features = ["html_reports"] } +foundry-test-utils.workspace = true +foundry-config.workspace = true +foundry-compilers = { workspace = true, features = ["project-util"] } +eyre.workspace = true +serde.workspace = true +serde_json.workspace = true +tempfile.workspace = true +tokio = { workspace = true, features = ["full"] } +chrono = { version = "0.4", features = ["serde"] } +rayon.workspace = true + +[dev-dependencies] +foundry-test-utils.workspace = true diff --git a/benches/LATEST.md b/benches/LATEST.md index f276dc5fd6133..a2c4e59b5931b 100644 --- a/benches/LATEST.md +++ b/benches/LATEST.md @@ -1,84 +1,69 @@ -# Forge Benchmarking Results +# Foundry Benchmarking Results -**Generated on:** Wed 18 Jun 2025 17:46:19 BST -**Hyperfine Version:** hyperfine 1.19.0 +**Generated on:** Fri 27 Jun 2025 15:51:19 IST **Foundry Versions Tested:** stable nightly -**Repositories Tested:** ithacaxyz-account solady -## Summary - -This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects. -The following benchmarks were performed: - -1. **forge test - Running the test suite (5 runs, 1 warmup)** -2. **forge build (no cache) - Clean build without cache (5 runs, cache cleaned after each run)** -3. **forge build (with cache) - Build with warm cache (5 runs, 1 warmup)** - ---- - -## Performance Comparison Tables +## Repositories Tested -### forge test - -Mean execution time in seconds (lower is better): - -| Project | stable (s) | nightly (s) | -|------|--------:|--------:| -| **ithacaxyz-account** | 5.791 | 3.875 | -| **solady** | 3.578 | 2.966 | +1. [ithacaxyz-account](https://github.com/ithacaxyz/main) +2. [solady](https://github.com/Vectorized/main) +3. [v4-core](https://github.com/Uniswap/main) +4. [morpho-blue](https://github.com/morpho-org/main) +5. [spark-psm](https://github.com/marsfoundation/master) +## Summary -### forge build no cache +This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects using Criterion.rs for precise performance measurements. -Mean execution time in seconds (lower is better): +The following benchmarks were performed: -| Project | stable (s) | nightly (s) | -|------|--------:|--------:| -| **ithacaxyz-account** | 19.079 | 16.177 | -| **solady** | 27.408 | 22.745 | +1. **forge-test** - Running the test suite (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) +3. **forge-build-with-cache** - Build with warm cache (10 samples each) +--- -### forge build with cache +# Benchmarks -Mean execution time in seconds (lower is better): +## Table of Contents -| Project | stable (s) | nightly (s) | -|------|--------:|--------:| -| **ithacaxyz-account** | 0.181 | 0.158 | -| **solady** | 0.091 | 0.103 | +- [Benchmark Results](#benchmark-results) + - [forge-test](#forge-test) + - [forge-build-no-cache](#forge-build-no-cache) + - [forge-build-with-cache](#forge-build-with-cache) +## Benchmark Results -## Foundry Version Details +### forge-test -### stable +| | `stable` | `nightly` | +| :---------------------- | :---------------------- | :----------------------------- | +| **`ithacaxyz-account`** | `3.75 s` (✅ **1.00x**) | `3.27 s` (✅ **1.15x faster**) | -``` -forge Version: 1.2.3-stable -``` +### forge-build-no-cache -### nightly +| | `stable` | `nightly` | +| :---------------------- | :----------------------- | :------------------------------ | +| **`ithacaxyz-account`** | `14.23 s` (✅ **1.00x**) | `14.25 s` (✅ **1.00x slower**) | -``` -forge Version: 1.2.3-nightly -``` +### forge-build-with-cache +| | `stable` | `nightly` | +| :---------------------- | :------------------------- | :-------------------------------- | +| **`ithacaxyz-account`** | `163.53 ms` (✅ **1.00x**) | `168.00 ms` (✅ **1.03x slower**) | -## Notes +--- -- All benchmarks were run with hyperfine in parallel mode -- **forge test - Running the test suite (5 runs, 1 warmup)** -- **forge build (no cache) - Clean build without cache (5 runs, cache cleaned after each run)** -- **forge build (with cache) - Build with warm cache (5 runs, 1 warmup)** -- Results show mean execution time in seconds -- N/A indicates benchmark failed or data unavailable +Made with [criterion-table](https://github.com/nu11ptr/criterion-table) ## System Information - **OS:** Darwin - **Architecture:** arm64 -- **Date:** Wed 18 Jun 2025 17:46:19 BST +- **Date:** Fri 27 Jun 2025 15:51:19 IST ## Raw Data -Raw JSON benchmark data is available in: `/Users/yash/dev/paradigm/foundry-rs/foundry/benches/benchmark_results/json_20250618_174101` +Detailed benchmark data and HTML reports are available in: +- `target/criterion/` - Individual benchmark reports diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 0000000000000..b3663831853b1 --- /dev/null +++ b/benches/README.md @@ -0,0 +1,104 @@ +# Foundry Benchmarks + +This directory contains performance benchmarks for Foundry commands across multiple repositories and Foundry versions. + +## Prerequisites + +Before running the benchmarks, ensure you have the following installed: + +1. **Rust and Cargo** - Required for building and running the benchmarks + + ```bash + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh + ``` + +2. **Foundryup** - The Foundry toolchain installer + + ```bash + curl -L https://foundry.paradigm.xyz | bash + foundryup + ``` + +3. **Git** - For cloning benchmark repositories + +4. **npm** - Some repositories require npm dependencies + + ```bash + # Install Node.js and npm from https://nodejs.org/ + ``` + +5. **Benchmark tools** - Required for generating reports + ```bash + cargo install cargo-criterion + cargo install criterion-table + ``` + +## Running Benchmarks + +### Run the complete benchmark suite + +```bash +cargo run +``` + +This will: + +1. Check and install required Foundry versions +2. Run all benchmark suites (forge_test, forge_build_no_cache, forge_build_with_cache) +3. Generate comparison tables using criterion-table +4. Create the final LATEST.md report + +### Run individual benchmark suites + +```bash +./run_benchmarks.sh +``` + +### Run specific benchmark + +```bash +cargo criterion --bench forge_test +cargo criterion --bench forge_build_no_cache +cargo criterion --bench forge_build_with_cache +``` + +## Benchmark Structure + +- `forge_test` - Benchmarks `forge test` command across repos +- `forge_build_no_cache` - Benchmarks `forge build` with clean cache +- `forge_build_with_cache` - Benchmarks `forge build` with existing cache + +## Configuration + +### Repositories + +Edit `src/lib.rs` to modify the list of repositories to benchmark: + +```rust +pub static BENCHMARK_REPOS: &[RepoConfig] = &[ + RepoConfig { name: "account", org: "ithacaxyz", repo: "account", rev: "main" }, + // Add more repositories here +]; +``` + +### Foundry Versions + +Edit `src/lib.rs` to modify the list of Foundry versions: + +```rust +pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; +``` + +## Results + +Benchmark results are displayed in the terminal and saved as HTML reports. The reports show: + +- Execution time statistics (mean, median, standard deviation) +- Comparison between different Foundry versions +- Performance trends across repositories + +## Troubleshooting + +1. **Foundry version not found**: Ensure the version is installed with `foundryup --install ` +2. **Repository clone fails**: Check network connectivity and repository access +3. **Build failures**: Some repositories may have specific dependencies - check their README files diff --git a/benches/benchmark.sh b/benches/benchmark.sh deleted file mode 100755 index 3d1e92aa62d9d..0000000000000 --- a/benches/benchmark.sh +++ /dev/null @@ -1,577 +0,0 @@ -#!/bin/bash - -# Foundry Multi-Version Benchmarking Suite -# This script benchmarks forge test and forge build commands across multiple repositories -# and multiple Foundry versions for comprehensive performance comparison - -set -e - -# Main execution -main() { - log_info "Starting Foundry Multi-Version Benchmarking Suite..." - log_info "Testing Foundry versions: ${FOUNDRY_VERSIONS[*]}" - log_info "Testing repositories: ${REPO_NAMES[*]}" - - # Setup - check_dependencies - setup_directories - - # Ensure cleanup on exit - trap cleanup EXIT - - # Install all Foundry versions upfront - install_all_foundry_versions - - # Clone/update repositories - for i in "${!REPO_NAMES[@]}"; do - clone_or_update_repo "${REPO_NAMES[$i]}" "${REPO_URLS[$i]}" - install_dependencies "${BENCHMARK_DIR}/${REPO_NAMES[$i]}" "${REPO_NAMES[$i]}" - done - - # Run benchmarks in parallel - benchmark_all_repositories_parallel - - # Compile results - compile_results - - log_success "Benchmarking complete!" - log_success "Results saved to: $RESULTS_FILE" - log_success "Latest results: $LATEST_RESULTS_FILE" - log_success "Raw JSON data saved to: $JSON_RESULTS_DIR" - log_info "You can view the results with: cat $LATEST_RESULTS_FILE" -} - -# Get the directory where this script is located -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" - -# Configuration -BENCHMARK_DIR="${SCRIPT_DIR}/benchmark_repos" -RESULTS_DIR="${SCRIPT_DIR}/benchmark_results" -TIMESTAMP=$(date +"%Y%m%d_%H%M%S") -RESULTS_FILE="${RESULTS_DIR}/foundry_multi_version_benchmark_${TIMESTAMP}.md" -LATEST_RESULTS_FILE="${SCRIPT_DIR}/LATEST.md" -JSON_RESULTS_DIR="${RESULTS_DIR}/json_${TIMESTAMP}" - -# Load configuration -source "${SCRIPT_DIR}/repos_and_versions.sh" - -# Load benchmark commands -source "${SCRIPT_DIR}/commands/forge_test.sh" -source "${SCRIPT_DIR}/commands/forge_build_no_cache.sh" -source "${SCRIPT_DIR}/commands/forge_build_with_cache.sh" - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' # No Color - -# Helper functions -log_info() { - echo -e "${BLUE}[INFO]${NC} $1" -} - -log_success() { - echo -e "${GREEN}[SUCCESS]${NC} $1" -} - -log_warn() { - echo -e "${YELLOW}[WARN]${NC} $1" -} - -log_error() { - echo -e "${RED}[ERROR]${NC} $1" -} - -# Install foundryup if not present -install_foundryup() { - if ! command -v foundryup &> /dev/null; then - log_info "Installing foundryup..." - curl -L https://foundry.paradigm.xyz | bash - # Source the bashrc/profile to get foundryup in PATH - export PATH="$HOME/.foundry/bin:$PATH" - fi -} - -# Install a specific Foundry version -install_foundry_version() { - local version=$1 - log_info "Installing Foundry version: $version" - - # Let foundryup handle any version format and determine validity - if foundryup --install "$version"; then - # Verify installation - local installed_version=$(forge --version | head -n1 || echo "unknown") - log_success "Installed Foundry: $installed_version" - return 0 - else - log_error "Failed to install Foundry $version" - return 1 - fi -} - -# Switch to a specific installed Foundry version -use_foundry_version() { - local version=$1 - log_info "Switching to Foundry version: $version" - - if foundryup --use "$version"; then - # Verify switch - local current_version=$(forge --version | head -n1 || echo "unknown") - log_success "Now using Foundry: $current_version" - return 0 - else - log_error "Failed to switch to Foundry $version" - return 1 - fi -} - -# Install all required Foundry versions upfront -install_all_foundry_versions() { - log_info "Installing all required Foundry versions as preprocessing step..." - - local failed_versions=() - - for version in "${FOUNDRY_VERSIONS[@]}"; do - if ! install_foundry_version "$version"; then - failed_versions+=("$version") - fi - done - - if [ ${#failed_versions[@]} -ne 0 ]; then - log_error "Failed to install the following Foundry versions: ${failed_versions[*]}" - log_error "Please check the version names and try again" - exit 1 - fi - - log_success "All Foundry versions installed successfully!" - - # List all installed versions for verification - log_info "Available installed versions:" - foundryup --list || log_warn "Could not list installed versions" -} - -# Check if required tools are installed -check_dependencies() { - local missing_deps=() - - if ! command -v hyperfine &> /dev/null; then - missing_deps+=("hyperfine") - fi - - if ! command -v git &> /dev/null; then - missing_deps+=("git") - fi - - if ! command -v curl &> /dev/null; then - missing_deps+=("curl") - fi - - if [ ${#missing_deps[@]} -ne 0 ]; then - log_error "Missing required dependencies: ${missing_deps[*]}" - log_info "Install hyperfine: https://github.com/sharkdp/hyperfine#installation" - exit 1 - fi - - # Install foundryup if needed - install_foundryup -} - -# Setup directories -setup_directories() { - log_info "Setting up benchmark directories..." - mkdir -p "$BENCHMARK_DIR" - mkdir -p "$RESULTS_DIR" - mkdir -p "$JSON_RESULTS_DIR" -} - -# Clone or update repository -clone_or_update_repo() { - local name=$1 - local url=$2 - local repo_dir="${BENCHMARK_DIR}/${name}" - - if [ -d "$repo_dir" ]; then - log_info "Updating existing repository: $name" - cd "$repo_dir" - git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true - cd - > /dev/null - else - log_info "Cloning repository: $name" - git clone "$url" "$repo_dir" - fi -} - -# Install dependencies for a repository -install_dependencies() { - local repo_dir=$1 - local repo_name=$2 - - log_info "Installing dependencies for $repo_name..." - cd "$repo_dir" - - # Install forge dependencies - if [ -f "foundry.toml" ]; then - forge install 2>/dev/null || true - fi - - # Install npm dependencies if package.json exists - if [ -f "package.json" ]; then - if command -v npm &> /dev/null; then - npm install 2>/dev/null || true - fi - fi - - cd - > /dev/null -} - -# Run benchmarks for a single repository with a specific Foundry version -benchmark_repository_for_version() { - local repo_name=$1 - local version=$2 - local repo_dir="${BENCHMARK_DIR}/${repo_name}" - - # Create a unique log file for this repo+version combination - local log_file="${JSON_RESULTS_DIR}/${repo_name}_${version//[^a-zA-Z0-9]/_}_benchmark.log" - - { - echo "$(date): Starting benchmark for $repo_name with Foundry $version" - - if [ ! -d "$repo_dir" ]; then - echo "ERROR: Repository directory not found: $repo_dir" - return 1 - fi - - cd "$repo_dir" - - # Check if it's a valid Foundry project - if [ ! -f "foundry.toml" ]; then - echo "WARN: No foundry.toml found in $repo_name, skipping..." - cd - > /dev/null - return 0 - fi - - # Clean version string for filenames (remove 'v' prefix, replace '.' with '_') - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - - local version_results_dir="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}" - mkdir -p "$version_results_dir" - - echo "Running benchmarks for $repo_name with Foundry $version..." - - # Run all benchmark commands - fail fast if any command fails - benchmark_forge_test "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge test benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - benchmark_forge_build_no_cache "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge build (no cache) benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - benchmark_forge_build_with_cache "$repo_name" "$version" "$version_results_dir" "$log_file" || { - echo "FATAL: forge build (with cache) benchmark failed for $repo_name with Foundry $version" >> "$log_file" - exit 1 - } - - # Store version info for this benchmark - forge --version | head -n1 > "${version_results_dir}/forge_version.txt" 2>/dev/null || echo "unknown" > "${version_results_dir}/forge_version.txt" - - cd - > /dev/null - echo "$(date): Completed benchmark for $repo_name with Foundry $version" - - } > "$log_file" 2>&1 -} - -# Run benchmarks for all repositories in parallel for each Foundry version -benchmark_all_repositories_parallel() { - for version in "${FOUNDRY_VERSIONS[@]}"; do - log_info "Switching to Foundry version: $version" - - # Switch to the pre-installed version - use_foundry_version "$version" || { - log_warn "Failed to switch to Foundry $version, skipping all repositories for this version..." - continue - } - - log_info "Starting parallel benchmarks for all repositories with Foundry $version" - - # Launch all repositories in parallel - local pids=() - - for repo_name in "${REPO_NAMES[@]}"; do - # Check if repo directory exists and is valid before starting background process - local repo_dir="${BENCHMARK_DIR}/${repo_name}" - if [ ! -d "$repo_dir" ]; then - log_warn "Repository directory not found: $repo_dir, skipping..." - continue - fi - - if [ ! -f "${repo_dir}/foundry.toml" ]; then - log_warn "No foundry.toml found in $repo_name, skipping..." - continue - fi - - log_info "Launching background benchmark for $repo_name..." - benchmark_repository_for_version "$repo_name" "$version" & - local pid=$! - pids+=($pid) - echo "$repo_name:$pid" >> "${JSON_RESULTS_DIR}/parallel_pids_${version//[^a-zA-Z0-9]/_}.txt" - done - - # Wait for all repositories to complete - log_info "Waiting for ${#pids[@]} parallel benchmarks to complete for Foundry $version..." - local completed=0 - local total=${#pids[@]} - - for pid in "${pids[@]}"; do - if wait "$pid"; then - completed=$((completed + 1)) - log_info "Progress: $completed/$total repositories completed for Foundry $version" - else - log_error "Benchmark process failed (PID: $pid) for Foundry $version" - exit 1 - fi - done - - log_success "All repositories completed for Foundry $version ($completed/$total successful)" - - # Show summary of log files created - log_info "Individual benchmark logs available in: ${JSON_RESULTS_DIR}/*_${version//[^a-zA-Z0-9]/_}_benchmark.log" - done -} - -# Extract mean time from JSON result file -extract_mean_time() { - local json_file=$1 - if [ -f "$json_file" ]; then - # Extract mean time in seconds, format to 3 decimal places - python3 -c " -import json, sys -try: - with open('$json_file') as f: - data = json.load(f) - mean_time = data['results'][0]['mean'] - print(f'{mean_time:.3f}') -except: - print('N/A') -" 2>/dev/null || echo "N/A" - else - echo "N/A" - fi -} - -# Get Foundry version string from file -get_forge_version() { - local version_file=$1 - if [ -f "$version_file" ]; then - cat "$version_file" | sed 's/forge //' | sed 's/ (.*//' - else - echo "unknown" - fi -} - -# Compile results into markdown with comparison tables -compile_results() { - log_info "Compiling benchmark results..." - - cat > "$RESULTS_FILE" << EOF -# Forge Benchmarking Results - -**Generated on:** $(date) -**Hyperfine Version:** $(hyperfine --version) -**Foundry Versions Tested:** ${FOUNDRY_VERSIONS[*]} -**Repositories Tested:** ${REPO_NAMES[*]} - -## Summary - -This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects. -The following benchmarks were performed: - -1. **$(get_forge_test_description)** -2. **$(get_forge_build_no_cache_description)** -3. **$(get_forge_build_with_cache_description)** - ---- - -## Performance Comparison Tables - -EOF - - # Create unified comparison tables for each benchmark type - local benchmark_commands=("forge_test" "forge_build_no_cache" "forge_build_with_cache") - - for cmd in "${benchmark_commands[@]}"; do - local bench_name="${cmd//_/ }" - local bench_type=$(get_${cmd}_type) - local json_filename=$(get_${cmd}_json_filename) - - echo "### $bench_name" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - echo "Mean execution time in seconds (lower is better):" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - # Create table header with proper column names - local header_row="| Project" - for version in "${FOUNDRY_VERSIONS[@]}"; do - header_row+=" | $version (s)" - done - header_row+=" |" - echo "$header_row" >> "$RESULTS_FILE" - - # Create table separator with proper alignment - local separator_row="|------" - for version in "${FOUNDRY_VERSIONS[@]}"; do - separator_row+="|--------:" - done - separator_row+="|" - echo "$separator_row" >> "$RESULTS_FILE" - - # Add data rows - for repo_name in "${REPO_NAMES[@]}"; do - local data_row="| **$repo_name**" - - for version in "${FOUNDRY_VERSIONS[@]}"; do - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - local version_results_dir="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}" - local json_file="${version_results_dir}/${json_filename}" - - local mean_time=$(extract_mean_time "$json_file") - data_row+=" | $mean_time" - done - data_row+=" |" - echo "$data_row" >> "$RESULTS_FILE" - done - echo "" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - done - - # Add detailed version information - echo "## Foundry Version Details" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - for version in "${FOUNDRY_VERSIONS[@]}"; do - echo "### $version" >> "$RESULTS_FILE" - echo "" >> "$RESULTS_FILE" - - # Find any version file to get the detailed version info - local clean_version="${version//v/}" - clean_version="${clean_version//\./_}" - - for repo_name in "${REPO_NAMES[@]}"; do - local version_file="${JSON_RESULTS_DIR}/${repo_name}_${clean_version}/forge_version.txt" - if [ -f "$version_file" ]; then - echo "\`\`\`" >> "$RESULTS_FILE" - cat "$version_file" >> "$RESULTS_FILE" - echo "\`\`\`" >> "$RESULTS_FILE" - break - fi - done - echo "" >> "$RESULTS_FILE" - done - - # Add notes and system info - cat >> "$RESULTS_FILE" << EOF - -## Notes - -- All benchmarks were run with hyperfine in parallel mode -- **$(get_forge_test_description)** -- **$(get_forge_build_no_cache_description)** -- **$(get_forge_build_with_cache_description)** -- Results show mean execution time in seconds -- N/A indicates benchmark failed or data unavailable - -## System Information - -- **OS:** $(uname -s) -- **Architecture:** $(uname -m) -- **Date:** $(date) - -## Raw Data - -Raw JSON benchmark data is available in: \`$JSON_RESULTS_DIR\` - -EOF - - # Copy to LATEST.md - cp "$RESULTS_FILE" "$LATEST_RESULTS_FILE" - log_success "Latest results also saved to: $LATEST_RESULTS_FILE" -} - -# Cleanup temporary files -cleanup() { - log_info "Cleanup completed" -} - -# Parse command line arguments -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --versions) - shift - if [[ $# -eq 0 ]]; then - log_error "--versions requires a space-separated list of versions" - exit 1 - fi - # Read versions until next flag or end of args - FOUNDRY_VERSIONS=() - while [[ $# -gt 0 && ! "$1" =~ ^-- ]]; do - FOUNDRY_VERSIONS+=("$1") - shift - done - ;; - --help|-h) - echo "Foundry Benchmarking Suite" - echo "" - echo "Usage: $0 [OPTIONS]" - echo "" - echo "OPTIONS:" - echo " --help, -h Show this help message" - echo " --version, -v Show version information" - echo " --versions ... Specify Foundry versions to benchmark" - echo " (default: from repos_and_versions.sh)" - echo "" - echo "EXAMPLES:" - echo " $0 # Use default versions (parallel)" - echo " $0 --versions stable nightly # Benchmark stable and nightly only" - echo " $0 --versions v1.0.0 v1.1.0 v1.2.0 # Benchmark specific versions" - echo "" - echo "This script benchmarks forge test and forge build commands across" - echo "multiple Foundry repositories and versions using hyperfine." - - echo "Supported version formats:" - echo " - stable, nightly (special tags)" - echo " - v1.0.0, v1.1.0, etc. (specific versions)" - echo " - nightly- (specific nightly builds)" - echo " - Any format supported by foundryup" - echo "" - echo "The script will:" - echo " 1. Install foundryup if not present" - echo " 2. Install all specified Foundry versions (preprocessing step)" - echo " 3. Clone/update target repositories" - echo " 4. Switch between versions and run benchmarks in parallel" - echo " 5. Generate comparison tables in markdown format" - echo " 6. Save results to LATEST.md" - exit 0 - ;; - --version|-v) - exit 0 - ;; - *) - log_error "Unknown option: $1" - echo "Use --help for usage information" - exit 1 - ;; - esac - done -} - -# Handle command line arguments -if [[ $# -gt 0 ]]; then - parse_args "$@" -fi - -main \ No newline at end of file diff --git a/benches/commands/forge_build_no_cache.sh b/benches/commands/forge_build_no_cache.sh deleted file mode 100755 index 756528176ac4e..0000000000000 --- a/benches/commands/forge_build_no_cache.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash - -# Forge Build (No Cache) Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge build' with no cache - -# Command configuration -FORGE_BUILD_NO_CACHE_RUNS=5 - -# Benchmark function for forge build (no cache) -benchmark_forge_build_no_cache() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge build' (no cache) benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_BUILD_NO_CACHE_RUNS" \ - --prepare 'forge clean' \ - --export-json "${version_results_dir}/build_no_cache_results.json" \ - "forge build" 2>>"$log_file.error"; then - echo "✓ forge build (no cache) completed" >> "$log_file" - return 0 - else - echo "✗ forge build (no cache) failed" >> "$log_file" - echo "FATAL: forge build (no cache) benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_build_no_cache_description() { - echo "forge build (no cache) - Clean build without cache ($FORGE_BUILD_NO_CACHE_RUNS runs, cache cleaned after each run)" -} - -# Get JSON result filename -get_forge_build_no_cache_json_filename() { - echo "build_no_cache_results.json" -} - -# Get benchmark type identifier -get_forge_build_no_cache_type() { - echo "build_no_cache" -} \ No newline at end of file diff --git a/benches/commands/forge_build_with_cache.sh b/benches/commands/forge_build_with_cache.sh deleted file mode 100755 index 25fab9dbc190f..0000000000000 --- a/benches/commands/forge_build_with_cache.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Forge Build (With Cache) Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge build' with cache - -# Command configuration -FORGE_BUILD_WITH_CACHE_RUNS=5 -FORGE_BUILD_WITH_CACHE_WARMUP=1 - -# Benchmark function for forge build (with cache) -benchmark_forge_build_with_cache() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge build' (with cache) benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_BUILD_WITH_CACHE_RUNS" \ - --prepare 'forge build' \ - --warmup "$FORGE_BUILD_WITH_CACHE_WARMUP" \ - --export-json "${version_results_dir}/build_with_cache_results.json" \ - "forge build" 2>>"$log_file.error"; then - echo "✓ forge build (with cache) completed" >> "$log_file" - return 0 - else - echo "✗ forge build (with cache) failed" >> "$log_file" - echo "FATAL: forge build (with cache) benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_build_with_cache_description() { - echo "forge build (with cache) - Build with warm cache ($FORGE_BUILD_WITH_CACHE_RUNS runs, $FORGE_BUILD_WITH_CACHE_WARMUP warmup)" -} - -# Get JSON result filename -get_forge_build_with_cache_json_filename() { - echo "build_with_cache_results.json" -} - -# Get benchmark type identifier -get_forge_build_with_cache_type() { - echo "build_with_cache" -} \ No newline at end of file diff --git a/benches/commands/forge_test.sh b/benches/commands/forge_test.sh deleted file mode 100755 index 9427d18d3c30c..0000000000000 --- a/benches/commands/forge_test.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -# Forge Test Benchmark Command -# This file contains the configuration and execution logic for benchmarking 'forge test' - -# Command configuration -FORGE_TEST_RUNS=5 -FORGE_TEST_WARMUP=1 - -# Benchmark function for forge test -benchmark_forge_test() { - local repo_name=$1 - local version=$2 - local version_results_dir=$3 - local log_file=$4 - - echo "Running 'forge test' benchmark..." >> "$log_file" - - if hyperfine \ - --runs "$FORGE_TEST_RUNS" \ - --prepare 'forge build' \ - --warmup "$FORGE_TEST_WARMUP" \ - --export-json "${version_results_dir}/test_results.json" \ - "forge test" 2>>"$log_file.error"; then - echo "✓ forge test completed" >> "$log_file" - return 0 - else - echo "✗ forge test failed" >> "$log_file" - echo "FATAL: forge test benchmark failed" >> "$log_file" - return 1 - fi -} - -# Get command description for reporting -get_forge_test_description() { - echo "forge test - Running the test suite ($FORGE_TEST_RUNS runs, $FORGE_TEST_WARMUP warmup)" -} - -# Get JSON result filename -get_forge_test_json_filename() { - echo "test_results.json" -} - -# Get benchmark type identifier -get_forge_test_type() { - echo "test" -} \ No newline at end of file diff --git a/benches/forge_build_no_cache.rs b/benches/forge_build_no_cache.rs new file mode 100644 index 0000000000000..f7b4a2d13f527 --- /dev/null +++ b/benches/forge_build_no_cache.rs @@ -0,0 +1,47 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{ + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, +}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; + +fn benchmark_forge_build_no_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-build-no-cache"); + group.sample_size(SAMPLE_SIZE); + + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + // Setup: prepare project (clone repo) + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { + // Switch foundry version + switch_foundry_version(&version).expect("Failed to switch foundry version"); + + // Run benchmarks for each project + for (repo_config, project) in &projects { + // Format: table_name/column_name/row_name + // This creates: forge-build-no-cache/{version}/{repo_name} + let bench_id = BenchmarkId::new(&version, repo_config.name); + + group.bench_function(bench_id, |b| { + b.iter(|| { + let output = project.run_forge_build(true).expect("forge build failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_build_no_cache); +criterion_main!(benches); diff --git a/benches/forge_build_with_cache.rs b/benches/forge_build_with_cache.rs new file mode 100644 index 0000000000000..154dd8a7f6100 --- /dev/null +++ b/benches/forge_build_with_cache.rs @@ -0,0 +1,50 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{ + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, +}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; + +fn benchmark_forge_build_with_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-build-with-cache"); + group.sample_size(SAMPLE_SIZE); + + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { + // Switch foundry version once per version + switch_foundry_version(&version).expect("Failed to switch foundry version"); + + projects.par_iter().for_each(|(_repo_config, project)| { + let _ = project.run_forge_build(false); + }); + + // Run benchmarks for each project + for (repo_config, project) in &projects { + // Format: table_name/column_name/row_name + // This creates: forge-build-with-cache/{version}/{repo_name} + let bench_id = BenchmarkId::new(&version, repo_config.name); + group.bench_function(bench_id, |b| { + b.iter(|| { + println!("Benching: forge-build-with-cache/{}/{}", version, repo_config.name); + let output = project.run_forge_build(false).expect("forge build failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_build_with_cache); +criterion_main!(benches); diff --git a/benches/forge_test.rs b/benches/forge_test.rs new file mode 100644 index 0000000000000..b12b30a067b47 --- /dev/null +++ b/benches/forge_test.rs @@ -0,0 +1,52 @@ +use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion}; +use foundry_bench::{ + get_benchmark_versions, switch_foundry_version, BenchmarkProject, BENCHMARK_REPOS, SAMPLE_SIZE, +}; +use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; + +fn benchmark_forge_test(c: &mut Criterion) { + let mut group = c.benchmark_group("forge-test"); + group.sample_size(SAMPLE_SIZE); + + // Setup all projects once - clone repos in parallel + let projects: Vec<_> = BENCHMARK_REPOS + .par_iter() + .map(|repo_config| { + // Setup: prepare project (clone repo) + let project = BenchmarkProject::setup(repo_config).expect("Failed to setup project"); + (repo_config, project) + }) + .collect(); + + // Get versions from environment variable or default + let versions = get_benchmark_versions(); + + for version in versions { + // Switch foundry version once per version + switch_foundry_version(&version).expect("Failed to switch foundry version"); + + // Build all projects in parallel for this foundry version + projects.par_iter().for_each(|(_repo_config, project)| { + project.run_forge_build(false).expect("forge build failed"); + }); + + // Run benchmarks for each project + for (repo_config, project) in &projects { + // Format: table_name/column_name/row_name + // This creates: forge-test/{version}/{repo_name} + let bench_id = BenchmarkId::new(&version, repo_config.name); + + group.bench_function(bench_id, |b| { + b.iter(|| { + let output = project.run_forge_test().expect("forge test failed"); + black_box(output); + }); + }); + } + } + + group.finish(); +} + +criterion_group!(benches, benchmark_forge_test); +criterion_main!(benches); diff --git a/benches/repos_and_versions.sh b/benches/repos_and_versions.sh deleted file mode 100755 index 07da480b05d2d..0000000000000 --- a/benches/repos_and_versions.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# Foundry Multi-Version Benchmarking Configuration -# This file contains the configuration for repositories and Foundry versions to benchmark - -# Foundry versions to benchmark -# Supported formats: -# - stable, nightly (special tags) -# - v1.0.0, v1.1.0, etc. (specific versions) -# - nightly- (specific nightly builds) -# - Any format supported by foundryup -FOUNDRY_VERSIONS=( - "stable" - "nightly" -) - -# Repository configurations -# Add new repositories by adding entries to both arrays -REPO_NAMES=( - "ithacaxyz-account" - # "v4-core" - "solady" - # "morpho-blue" - # "spark-psm" -) - -REPO_URLS=( - "https://github.com/ithacaxyz/account" - # "https://github.com/Uniswap/v4-core" - "https://github.com/Vectorized/solady" - # "https://github.com/morpho-org/morpho-blue" - # "https://github.com/sparkdotfi/spark-psm" -) - -# Verify arrays have the same length -if [ ${#REPO_NAMES[@]} -ne ${#REPO_URLS[@]} ]; then - echo "ERROR: REPO_NAMES and REPO_URLS arrays must have the same length" - exit 1 -fi - -# Export variables for use in other scripts -export FOUNDRY_VERSIONS -export REPO_NAMES -export REPO_URLS \ No newline at end of file diff --git a/benches/run_benchmarks.sh b/benches/run_benchmarks.sh new file mode 100755 index 0000000000000..a4b343a0ac226 --- /dev/null +++ b/benches/run_benchmarks.sh @@ -0,0 +1,359 @@ +#!/bin/bash + +# Foundry Benchmark Runner with Criterion Table Output +# This script runs the criterion-based benchmarks and generates a markdown report + +set -euo pipefail + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# Logging functions +log_info() { + echo -e "${BLUE}[INFO]${NC} $1" +} + +log_success() { + echo -e "${GREEN}[SUCCESS]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# Script directory +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +cd "$SCRIPT_DIR" + +# Check if required tools are installed +check_dependencies() { + if ! command -v criterion-table &> /dev/null; then + log_error "criterion-table is not installed. Please install it with:" + echo "cargo install criterion-table" + exit 1 + fi + + if ! cargo criterion --help &> /dev/null; then + log_error "cargo-criterion is not installed. Please install it with:" + echo "cargo install cargo-criterion" + exit 1 + fi +} + +# Install Foundry versions if requested +install_foundry_versions() { + if [[ "$FORCE_INSTALL" != "true" ]]; then + return + fi + + local versions + + # Use custom versions if provided, otherwise read from lib.rs + if [[ -n "$CUSTOM_VERSIONS" ]]; then + versions=$(echo "$CUSTOM_VERSIONS" | tr ',' ' ') + log_info "Installing custom Foundry versions: $versions" + else + # Read the versions from the Rust source + versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"') + log_info "Installing default Foundry versions from lib.rs: $versions" + fi + + # Check if foundryup is available + if ! command -v foundryup &> /dev/null; then + log_error "foundryup not found. Please install Foundry first:" + echo "curl -L https://foundry.paradigm.xyz | bash" + exit 1 + fi + + # Install each version + for version in $versions; do + log_info "Installing Foundry version: $version" + if foundryup --install "$version"; then + log_success "✓ Successfully installed version $version" + else + log_error "Failed to install Foundry version: $version" + exit 1 + fi + done + + log_success "All Foundry versions installed successfully" +} + +# Get system information +get_system_info() { + local os_name=$(uname -s) + local arch=$(uname -m) + local date=$(date) + + echo "- **OS:** $os_name" + echo "- **Architecture:** $arch" + echo "- **Date:** $date" +} + + +# Run benchmarks and generate report +run_benchmarks() { + log_info "Running Foundry benchmarks..." + + # Set environment variable for custom versions if provided + if [[ -n "$CUSTOM_VERSIONS" ]]; then + export FOUNDRY_BENCH_VERSIONS="$CUSTOM_VERSIONS" + log_info "Set FOUNDRY_BENCH_VERSIONS=$CUSTOM_VERSIONS" + fi + + # Create temp files for each benchmark + local temp_dir=$(mktemp -d) + local forge_test_json="$temp_dir/forge_test.json" + local forge_build_no_cache_json="$temp_dir/forge_build_no_cache.json" + local forge_build_with_cache_json="$temp_dir/forge_build_with_cache.json" + + # Set up output redirection based on verbose flag + local output_redirect="" + if [[ "${VERBOSE:-false}" != "true" ]]; then + output_redirect="2>/dev/null" + fi + + # Run benchmarks in specific order (this determines baseline column) + log_info "Running forge_test benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_test --message-format=json > "$forge_test_json" || { + log_error "forge_test benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_test --message-format=json > "$forge_test_json" 2>/dev/null || { + log_error "forge_test benchmark failed" + exit 1 + } + fi + + log_info "Running forge_build_no_cache benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_build_no_cache --message-format=json > "$forge_build_no_cache_json" || { + log_error "forge_build_no_cache benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_build_no_cache --message-format=json > "$forge_build_no_cache_json" 2>/dev/null || { + log_error "forge_build_no_cache benchmark failed" + exit 1 + } + fi + + log_info "Running forge_build_with_cache benchmark..." + if [[ "${VERBOSE:-false}" == "true" ]]; then + cargo criterion --bench forge_build_with_cache --message-format=json > "$forge_build_with_cache_json" || { + log_error "forge_build_with_cache benchmark failed" + exit 1 + } + else + cargo criterion --bench forge_build_with_cache --message-format=json > "$forge_build_with_cache_json" 2>/dev/null || { + log_error "forge_build_with_cache benchmark failed" + exit 1 + } + fi + + # Combine all results and generate markdown + log_info "Generating markdown report with criterion-table..." + + if ! cat "$forge_test_json" "$forge_build_no_cache_json" "$forge_build_with_cache_json" | criterion-table > "$temp_dir/tables.md"; then + log_error "criterion-table failed to process benchmark data" + exit 1 + fi + + # Generate the final report + generate_report "$temp_dir/tables.md" + + # Cleanup + rm -rf "$temp_dir" + + log_success "Benchmark report generated in LATEST.md" +} + +# Generate the final markdown report +generate_report() { + local tables_file="$1" + local report_file="LATEST.md" + + log_info "Generating final report..." + + # Get current timestamp + local timestamp=$(date) + + # Get repository information and create numbered list with links + local versions + if [[ -n "$CUSTOM_VERSIONS" ]]; then + versions=$(echo "$CUSTOM_VERSIONS" | tr ',' ' ') + else + versions=$(grep -A 10 'pub static FOUNDRY_VERSIONS' src/lib.rs | grep -o '"[^"]*"' | tr -d '"' | tr '\n' ' ') + fi + + # Extract repository info for numbered list + local repo_list="" + local counter=1 + + # Parse the BENCHMARK_REPOS section + while IFS= read -r line; do + if [[ $line =~ RepoConfig.*name:.*\"([^\"]+)\".*org:.*\"([^\"]+)\".*repo:.*\"([^\"]+)\" ]]; then + local name="${BASH_REMATCH[1]}" + local org="${BASH_REMATCH[2]}" + local repo="${BASH_REMATCH[3]}" + repo_list+="$counter. [$name](https://github.com/$org/$repo)\n" + ((counter++)) + fi + done < <(grep -A 20 'pub static BENCHMARK_REPOS' src/lib.rs | grep 'RepoConfig') + + # Write the report + cat > "$report_file" << EOF +# Foundry Benchmarking Results + +**Generated on:** $timestamp +**Foundry Versions Tested:** $versions + +## Repositories Tested + +$(echo -e "$repo_list") + +## Summary + +This report contains comprehensive benchmarking results comparing different Foundry versions across multiple projects using Criterion.rs for precise performance measurements. + +The following benchmarks were performed: + +1. **forge-test** - Running the test suite (10 samples each) +2. **forge-build-no-cache** - Clean build without cache (10 samples each) +3. **forge-build-with-cache** - Build with warm cache (10 samples each) + +--- + +EOF + + # Append the criterion-table generated tables + cat "$tables_file" >> "$report_file" + + # Add system info + cat >> "$report_file" << EOF + +## System Information + +$(get_system_info) + +## Raw Data + +Detailed benchmark data and HTML reports are available in: +- \`target/criterion/\` - Individual benchmark reports + +EOF + + log_success "Report written to $report_file" +} + +# Main function +main() { + log_info "Starting Foundry benchmark suite..." + + # Check dependencies + check_dependencies + + # Install Foundry versions if --force-install is used + install_foundry_versions + + # Run benchmarks and generate report + run_benchmarks + + log_success "Benchmark suite completed successfully!" + echo "" + echo "View the results:" + echo " - Text report: cat LATEST.md" +} + +# Help function +show_help() { + cat << EOF +Foundry Benchmark Runner + +This script runs Criterion-based benchmarks for Foundry commands and generates +a markdown report using criterion-table. + +USAGE: + $0 [OPTIONS] + +OPTIONS: + -h, --help Show this help message + -v, --version Show version information + --verbose Show benchmark output (by default output is suppressed) + --versions Comma-separated list of Foundry versions to test + (e.g. stable,nightly,v1.2.0) + If not specified, uses versions from src/lib.rs + --force-install Force installation of Foundry versions + By default, assumes versions are already installed + +REQUIREMENTS: + - criterion-table: cargo install criterion-table + - cargo-criterion: cargo install cargo-criterion + - Foundry versions must be installed (or use --force-install) + +EXAMPLES: + $0 # Run with default versions + $0 --verbose # Show full output + $0 --versions stable,nightly # Test specific versions + $0 --versions stable,nightly --force-install # Install and test versions + +The script will: +1. Run forge_test, forge_build_no_cache, and forge_build_with_cache benchmarks +2. Generate comparison tables using criterion-table +3. Include system information and Foundry version details +4. Save the complete report to LATEST.md + +EOF +} + +# Default values +VERBOSE=false +FORCE_INSTALL=false +CUSTOM_VERSIONS="" + +# Parse command line arguments +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + show_help + exit 0 + ;; + -v|--version) + echo "Foundry Benchmark Runner v1.0.0" + exit 0 + ;; + --verbose) + VERBOSE=true + shift + ;; + --force-install) + FORCE_INSTALL=true + shift + ;; + --versions) + if [[ -z "$2" ]] || [[ "$2" == --* ]]; then + log_error "--versions requires a comma-separated list of versions" + exit 1 + fi + CUSTOM_VERSIONS="$2" + shift 2 + ;; + *) + log_error "Unknown option: $1" + echo "Use -h or --help for usage information" + exit 1 + ;; + esac +done + +main \ No newline at end of file diff --git a/benches/src/lib.rs b/benches/src/lib.rs new file mode 100644 index 0000000000000..b1c74b18c6142 --- /dev/null +++ b/benches/src/lib.rs @@ -0,0 +1,211 @@ +use eyre::{Result, WrapErr}; +use foundry_compilers::project_util::TempProject; +use foundry_test_utils::util::clone_remote; +use std::{ + env, + path::{Path, PathBuf}, + process::{Command, Output}, +}; + +/// Configuration for repositories to benchmark +#[derive(Debug, Clone)] +pub struct RepoConfig { + pub name: &'static str, + pub org: &'static str, + pub repo: &'static str, + pub rev: &'static str, +} + +/// Available repositories for benchmarking +pub static BENCHMARK_REPOS: &[RepoConfig] = &[ + RepoConfig { name: "ithacaxyz-account", org: "ithacaxyz", repo: "account", rev: "main" }, + // Temporarily reduced for testing + // RepoConfig { name: "solady", org: "Vectorized", repo: "solady", rev: "main" }, + // RepoConfig { name: "v4-core", org: "Uniswap", repo: "v4-core", rev: "main" }, + // RepoConfig { name: "morpho-blue", org: "morpho-org", repo: "morpho-blue", rev: "main" }, + // RepoConfig { name: "spark-psm", org: "marsfoundation", repo: "spark-psm", rev: "master" }, +]; + +/// Sample size for benchmark measurements +/// +/// This controls how many times each benchmark is run for statistical analysis. +/// Higher values provide more accurate results but take longer to complete. +pub const SAMPLE_SIZE: usize = 10; + +/// Foundry versions to benchmark +/// +/// To add more versions for comparison, install them first: +/// ```bash +/// foundryup --install stable +/// foundryup --install nightly +/// foundryup --install v0.2.0 # Example specific version +/// ``` +/// +/// Then add the version strings to this array. Supported formats: +/// - "stable" - Latest stable release +/// - "nightly" - Latest nightly build +/// - "v0.2.0" - Specific version tag +/// - "commit-hash" - Specific commit hash +/// - "nightly-" - Nightly build with specific revision +pub static FOUNDRY_VERSIONS: &[&str] = &["stable", "nightly"]; + +/// A benchmark project that represents a cloned repository ready for testing +pub struct BenchmarkProject { + pub name: String, + pub temp_project: TempProject, + pub root_path: PathBuf, +} + +impl BenchmarkProject { + /// Set up a benchmark project by cloning the repository + pub fn setup(config: &RepoConfig) -> Result { + let temp_project = + TempProject::dapptools().wrap_err("Failed to create temporary project")?; + + // Get root path before clearing + let root_path = temp_project.root().to_path_buf(); + let root = root_path.to_str().unwrap(); + + // Remove all files in the directory + for entry in std::fs::read_dir(&root_path)? { + let entry = entry?; + let path = entry.path(); + if path.is_dir() { + std::fs::remove_dir_all(&path).ok(); + } else { + std::fs::remove_file(&path).ok(); + } + } + + // Clone the repository + let repo_url = format!("https://github.com/{}/{}.git", config.org, config.repo); + clone_remote(&repo_url, root); + + // Checkout specific revision if provided + if !config.rev.is_empty() && config.rev != "main" && config.rev != "master" { + let status = Command::new("git") + .current_dir(root) + .args(["checkout", config.rev]) + .status() + .wrap_err("Failed to checkout revision")?; + + if !status.success() { + eyre::bail!("Git checkout failed for {}", config.name); + } + } + + // Git submodules are already cloned via --recursive flag + // But npm dependencies still need to be installed + Self::install_npm_dependencies(&root_path)?; + + println!(" ✅ Project {} setup complete at {}", config.name, root); + Ok(BenchmarkProject { name: config.name.to_string(), root_path, temp_project }) + } + + /// Install npm dependencies if package.json exists + fn install_npm_dependencies(root: &Path) -> Result<()> { + if root.join("package.json").exists() { + println!(" 📦 Running npm install..."); + let status = Command::new("npm") + .current_dir(root) + .args(["install"]) + .stdout(std::process::Stdio::inherit()) + .stderr(std::process::Stdio::inherit()) + .status() + .wrap_err("Failed to run npm install")?; + + if !status.success() { + println!(" ⚠️ Warning: npm install failed with exit code: {:?}", status.code()); + } else { + println!(" ✅ npm install completed successfully"); + } + } + Ok(()) + } + + /// Run forge test command and return the output + pub fn run_forge_test(&self) -> Result { + Command::new("forge") + .current_dir(&self.root_path) + .args(["test"]) + .output() + .wrap_err("Failed to run forge test") + } + + /// Run forge build command and return the output + pub fn run_forge_build(&self, clean_cache: bool) -> Result { + if clean_cache { + // Clean first + let _ = Command::new("forge").current_dir(&self.root_path).args(["clean"]).output(); + } + + Command::new("forge") + .current_dir(&self.root_path) + .args(["build"]) + .output() + .wrap_err("Failed to run forge build") + } + + /// Get the root path of the project + pub fn root(&self) -> &Path { + &self.root_path + } +} + +/// Switch to a specific foundry version +pub fn switch_foundry_version(version: &str) -> Result<()> { + let output = Command::new("foundryup") + .args(["--use", version]) + .output() + .wrap_err("Failed to run foundryup")?; + + // Check if the error is about forge --version failing + let stderr = String::from_utf8_lossy(&output.stderr); + if stderr.contains("command failed") && stderr.contains("forge --version") { + eyre::bail!("Foundry binaries maybe corrupted. Please reinstall, please run `foundryup` and install the required versions."); + } + + if !output.status.success() { + eprintln!("foundryup stderr: {}", stderr); + eyre::bail!("Failed to switch to foundry version: {}", version); + } + + println!(" Successfully switched to version: {}", version); + Ok(()) +} + +/// Get the current forge version +pub fn get_forge_version() -> Result { + let output = Command::new("forge") + .args(["--version"]) + .output() + .wrap_err("Failed to get forge version")?; + + if !output.status.success() { + eyre::bail!("forge --version failed"); + } + + let version = + String::from_utf8(output.stdout).wrap_err("Invalid UTF-8 in forge version output")?; + + Ok(version.lines().next().unwrap_or("unknown").to_string()) +} + +/// Get Foundry versions to benchmark from environment variable or default +/// +/// Reads from FOUNDRY_BENCH_VERSIONS environment variable if set, +/// otherwise returns the default versions from FOUNDRY_VERSIONS constant. +/// +/// The environment variable should be a comma-separated list of versions, +/// e.g., "stable,nightly,v1.2.0" +pub fn get_benchmark_versions() -> Vec { + if let Ok(versions_env) = env::var("FOUNDRY_BENCH_VERSIONS") { + versions_env + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect() + } else { + FOUNDRY_VERSIONS.iter().map(|&s| s.to_string()).collect() + } +}