diff --git a/.circleci/config.yml b/.circleci/config.yml index dce7b45a00bde..f5ad47fbf7aa4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -47,9 +47,21 @@ parameters: contracts_coverage_dispatch: type: boolean default: false + heavy_fuzz_dispatch: + type: boolean + default: false acceptance_tests_dispatch: type: boolean default: false + kurtosis_acceptance_tests_dispatch: + type: boolean + default: false + sync_test_op_node_dispatch: + type: boolean + default: false + ai_contracts_test_dispatch: + type: boolean + default: false github-event-type: type: string default: "__not_set__" @@ -62,89 +74,27 @@ parameters: devnet-metrics-collect: type: boolean default: false + flake-shake-dispatch: + type: boolean + default: false + flake-shake-iterations: + type: integer + default: 100 + flake-shake-workers: + type: integer + default: 10 orbs: go: circleci/go@1.8.0 gcp-cli: circleci/gcp-cli@3.0.1 - slack: circleci/slack@5.1.1 + slack: circleci/slack@6.0.0 shellcheck: circleci/shellcheck@3.2.0 codecov: codecov/codecov@5.0.3 + utils: ethereum-optimism/circleci-utils@1.0.20 docker: circleci/docker@2.8.2 github-cli: circleci/github-cli@2.7.0 commands: - checkout-with-mise: - description: "Checkout then initialize the mise environment" - parameters: - mise_data_dir: - type: string - default: "~/.mise-data" - steps: - - checkout - - run: - name: "Initialize mise environment" - command: | - user=$(whoami) - echo "$user" > .executor-user - echo "Set executor user to $user." - echo "Mise data dir: << parameters.mise_data_dir >>." - if [[ "$user" == "root" ]]; then - rm -rf << parameters.mise_data_dir >> - echo "Cleaned up cache data." - mkdir -p << parameters.mise_data_dir >> - echo "Created Mise data dir." - mkdir -p ~/.cache - echo "Created Mise cache dir." - else - rm -rf << parameters.mise_data_dir >> - echo "Cleaned up cache data." - mkdir -p << parameters.mise_data_dir >> - chown -R "$user:$user" << parameters.mise_data_dir >> - echo "Created Mise data dir." - mkdir -p ~/.cache - chown -R "$user:$user" ~/.cache - echo "Created Mise cache dir." - fi - - run: - name: Detect glibc version - command: | - if command -v ldd >/dev/null 2>&1; then - # Use process substitution to avoid SIGPIPE in pipefail - awk 'NR==1 {print $NF}' <(ldd --version) > .glibc-version - else - echo "musl" > .glibc-version - fi - echo "Detected glibc/musl version: $(cat .glibc-version)" - - restore_cache: - name: "Restore mise cache" - keys: - - "mise-v5-cache-{{ .Environment.CACHE_VERSION }}-{{ checksum \".executor-user\" }}-{{ checksum \"mise.toml\" }}-{{ checksum \".glibc-version\" }}" - - run: - name: "Install mise" - command: | - if command -v mise &> /dev/null; then - echo "mise already installed at $(command -v mise)" - else - curl https://mise.run | sh - fi - echo "export PATH=\"$HOME/.local/bin:\$PATH\"" >> "$BASH_ENV" - echo "export MISE_DATA_DIR=<< parameters.mise_data_dir >>" >> "$BASH_ENV" - echo "export MISE_JOBS=$(nproc)" >> "$BASH_ENV" - echo "eval \"\$($HOME/.local/bin/mise activate --shims)\"" >> "$BASH_ENV" - - run: - name: "Install mise deps" - command: | - mise install -v -y - - save_cache: - name: "Save mise cache" - key: "mise-v5-cache-{{ .Environment.CACHE_VERSION }}-{{ checksum \".executor-user\" }}-{{ checksum \"mise.toml\" }}-{{ checksum \".glibc-version\" }}" - paths: - - << parameters.mise_data_dir >> - - run: - name: "Clean up temp files" - command: | - rm -f .executor-user .glibc-version - gcp-oidc-authenticate: description: "Authenticate with GCP using a CircleCI OIDC token." parameters: @@ -290,12 +240,68 @@ commands: DISCORD_MESSAGE="${DISCORD_MESSAGE}\n\n**Attention:** << parameters.mentions >>" fi + # Add extra mentions from environment (e.g. owners from flake-shake) + if [ -n "${EXTRA_DISCORD_MENTIONS:-}" ]; then + DISCORD_MESSAGE="${DISCORD_MESSAGE}\n\n**Owners:** ${EXTRA_DISCORD_MENTIONS}" + fi + # Post to Discord webhook curl -X POST -H "Content-Type: application/json" \ -d "{\"content\": \"${DISCORD_MESSAGE}\"}" "${notify_ci}" fi when: on_fail + get-target-branch: + description: "Determine the PR target branch and export TARGET_BRANCH for subsequent steps" + steps: + - run: + name: Determine target branch for this pipeline + command: | + TARGET_BRANCH="" + if [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then + TARGET_BRANCH=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${CIRCLE_PULL_REQUEST##*/}" | jq -r .base.ref) + fi + + # Fallbacks when not a PR or API did not return a branch + if [ -z "$TARGET_BRANCH" ] || [ "$TARGET_BRANCH" = "null" ]; then + TARGET_BRANCH="<< pipeline.git.branch >>" + fi + + echo "Resolved TARGET_BRANCH=$TARGET_BRANCH" + echo "export TARGET_BRANCH=$TARGET_BRANCH" >> "$BASH_ENV" + + setup-dev-features: + description: "Set up dev feature environment variables from comma-separated list" + parameters: + dev_features: + description: "Comma-separated list of dev features to enable" + type: string + default: "" + steps: + - run: + name: Set dev feature environment variables + command: | + # Set dev feature environment variables if provided + if [ -n "<>" ]; then + DEV_FEATURES_STRING="<>" + + # Check if this is just "main" (baseline with no dev features) + if [ "$(echo "$DEV_FEATURES_STRING" | tr '[:upper:]' '[:lower:]')" = "main" ]; then + echo "Running with baseline configuration (no dev features enabled)" + else + echo "Enabling dev features: <>" + IFS=',' + for feature in $DEV_FEATURES_STRING; do + feature=$(echo "$feature" | xargs) # trim whitespace + if [ -n "$feature" ] && [ "$(echo "$feature" | tr '[:upper:]' '[:lower:]')" != "main" ]; then + env_var="DEV_FEATURE__${feature}" + echo "Setting ${env_var}=true" + echo "export ${env_var}=true" >> $BASH_ENV + fi + done + unset IFS + fi + fi run-contracts-check: parameters: @@ -316,7 +322,147 @@ commands: environment: FOUNDRY_PROFILE: ci + checkout-from-workspace: + steps: + - attach_workspace: + at: "." + - utils/install-mise + jobs: + # Kurtosis-based acceptance tests + op-acceptance-tests-kurtosis: + parameters: + devnet: + description: | + The name of the pre-defined Kurtosis devnet to run the acceptance tests against + (e.g. 'simple', 'interop', 'jovian'). Empty string uses in-process testing (sysgo orchestrator). + type: string + default: "interop" + gate: + description: The gate to run the acceptance tests against. Must be defined in op-acceptance-tests/acceptance-tests.yaml. + type: string + default: "interop" + no_output_timeout: + description: Timeout for when CircleCI kills the job if there's no output + type: string + default: 30m + use_circleci_runner: + description: Whether to use CircleCI runners (with Docker) instead of self-hosted runners + type: boolean + default: false + machine: + image: <<# parameters.use_circleci_runner >>ubuntu-2404:current<><<^ parameters.use_circleci_runner >>true<> + docker_layer_caching: <> + resource_class: <<# parameters.use_circleci_runner >>xlarge<><<^ parameters.use_circleci_runner >>ethereum-optimism/latitude-1<> + steps: + - checkout-from-workspace + - run: + name: Lint/Vet/Build op-acceptance-tests/cmd + working_directory: op-acceptance-tests + command: | + just cmd-check + - run: + name: Setup Kurtosis + command: | + echo "Setting up Kurtosis for external devnet testing..." + echo "Using Kurtosis from: $(which kurtosis || echo 'not found')" + kurtosis version || true + echo "Starting Kurtosis engine..." + kurtosis engine start || true + echo "Cleaning old instances..." + kurtosis clean -a || true + kurtosis engine status || true + echo "Kurtosis setup complete" + - run: + name: Dump kurtosis logs (pre-run) + command: | + # Best-effort: show engine status and existing enclaves before the test run + kurtosis engine status || true + kurtosis enclave ls || true + - run: + name: Run acceptance tests (devnet=<>, gate=<>) + working_directory: op-acceptance-tests + no_output_timeout: 1h + environment: + GOFLAGS: "-mod=mod" + GO111MODULE: "on" + GOGC: "0" + command: | + LOG_LEVEL=info just acceptance-test "<>" "<>" + - run: + name: Dump kurtosis logs + when: on_fail + command: | + # Dump logs & specs + kurtosis dump ./.kurtosis-dump + + # Remove spec.json files + rm -rf ./.kurtosis-dump/enclaves/**/*.json + + # Remove all unnecessary logs + rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-api--* + rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-logs-collector--* + rm -rf ./.kurtosis-dump/enclaves/*/task-* + + # Print enclaves and try to show service logs for the most recent devnet + kurtosis enclave ls || true + # Dump logs for all enclaves to aid debugging + for e in $(kurtosis enclave ls --output json 2>/dev/null | jq -r '.[].identifier' 2>/dev/null); do + echo "\n==== Kurtosis logs for enclave: $e ====" + kurtosis enclave inspect "$e" || true + kurtosis service logs "$e" --all-services --follow=false || true + done + - run: + name: Print results (summary) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/summary.log" || true + - run: + name: Print results (failures) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/failed/*.log" || true + when: on_fail + - run: + name: Print results (all) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/all.log" || true + - run: + name: Generate JUnit XML test report for CircleCI + working_directory: op-acceptance-tests + when: always + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + gotestsum --junitfile results/results.xml --raw-command cat $LOG_DIR/raw_go_events.log || true + - when: + condition: always + steps: + - store_test_results: + path: ./op-acceptance-tests/results + - when: + condition: always + steps: + - store_artifacts: + path: ./op-acceptance-tests/logs + - discord-notification-failures-on-develop: + mentions: "Platforms (<@&1346448413172170807>)" # Protocol DevX Pod + message: "Kurtosis acceptance tests failed for devnet <> gate <>" + initialize: + docker: + - image: <> + resource_class: large + steps: + - utils/checkout-with-mise + - install-contracts-dependencies + - persist_to_workspace: + root: "." + paths: + - "." + cannon-go-lint-and-test: machine: true resource_class: qkc/ax101 @@ -333,11 +479,9 @@ jobs: type: boolean default: false steps: - - checkout-with-mise + - checkout-from-workspace - check-changed: patterns: cannon,packages/contracts-bedrock/src/cannon,op-preimage,go.mod - - attach_workspace: - at: "." - run: name: prep Cannon results dir command: | @@ -383,7 +527,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - run: name: Check `RISCV.sol` bytecode working_directory: packages/contracts-bedrock @@ -431,8 +575,7 @@ jobs: type: string default: ci steps: - - checkout-with-mise - - install-contracts-dependencies + - checkout-from-workspace - run: name: Print forge version command: forge --version @@ -446,12 +589,18 @@ jobs: environment: FOUNDRY_PROFILE: <> working_directory: packages/contracts-bedrock + - run: + name: "Copy artifacts into deployer" + command: | + just copy-contract-artifacts + working_directory: op-deployer - persist_to_workspace: root: "." paths: - "packages/contracts-bedrock/cache" - "packages/contracts-bedrock/artifacts" - "packages/contracts-bedrock/forge-artifacts" + - "op-deployer/pkg/deployer/artifacts/forge-artifacts" - notify-failures-on-develop check-kontrol-build: @@ -459,7 +608,7 @@ jobs: - image: <> resource_class: xlarge steps: - - checkout-with-mise + - utils/checkout-with-mise - attach_workspace: { at: "." } - install-contracts-dependencies - check-changed: @@ -520,11 +669,7 @@ jobs: resource_class: "<>" docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages steps: - - checkout-with-mise - - run: - command: git submodule update --init - - attach_workspace: - at: /tmp/docker_images + - checkout-from-workspace - run: command: mkdir -p /tmp/docker_images - when: @@ -715,48 +860,6 @@ jobs: docker pull $image_name || exit 1 docker run $image_name <> --version || exit 1 - contracts-bedrock-frozen-code: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock - - run: - name: Check if target branch is develop - command: | - # Get PR number from CIRCLE_PULL_REQUEST - PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | rev | cut -d/ -f1 | rev) - - # Use GitHub API to get target branch - TARGET_BRANCH=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUMBER}" | jq -r .base.ref) - - # If the target branch is not develop, do not run this check - if [ "$TARGET_BRANCH" != "develop" ]; then - echo "Target branch is not develop, skipping frozen files check" - circleci-agent step halt - fi - - run: - name: Check if PR has exempt label - command: | - # Get PR number from CIRCLE_PULL_REQUEST - PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | rev | cut -d/ -f1 | rev) - - # Use GitHub API to get labels - LABELS=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUMBER}" | jq -r .labels) - - # If the PR has the "M-exempt-frozen-files" label, do not run this check - if echo $LABELS | jq -e 'any(.[]; .name == "M-exempt-frozen-files")' > /dev/null; then - echo "Skipping frozen files check, PR has exempt label" - circleci-agent step halt - fi - - run: - name: Check frozen files - command: just check-frozen-code - working_directory: packages/contracts-bedrock - contracts-bedrock-tests: circleci_ip_ranges: true docker: @@ -782,10 +885,12 @@ jobs: description: Profile to use for testing type: string default: ci + check_changed_patterns: + description: List of changed files to run tests on + type: string + default: contracts-bedrock steps: - - checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies + - checkout-from-workspace - run: name: Check if test list is empty command: | @@ -796,7 +901,7 @@ jobs: fi working_directory: packages/contracts-bedrock - check-changed: - patterns: contracts-bedrock,op-node + patterns: <> - run: name: Print dependencies command: just dep-status @@ -846,10 +951,76 @@ jobs: - "/root/.cache/go-build" - notify-failures-on-develop + contracts-bedrock-heavy-fuzz-nightly: + circleci_ip_ranges: true + docker: + - image: <> + resource_class: xlarge + steps: + - checkout-from-workspace + - run: + name: Print dependencies + command: just dep-status + working_directory: packages/contracts-bedrock + - run: + name: Print forge version + command: forge --version + working_directory: packages/contracts-bedrock + - run: + name: Pull artifacts + command: bash scripts/ops/pull-artifacts.sh + working_directory: packages/contracts-bedrock + - run: + name: Build go-ffi + command: just build-go-ffi + working_directory: packages/contracts-bedrock + - run: + name: Run heavy fuzz tests + command: just test + environment: + FOUNDRY_PROFILE: ciheavy + working_directory: packages/contracts-bedrock + no_output_timeout: 90m + - run: + name: Print failed test traces + command: just test-rerun + environment: + FOUNDRY_PROFILE: ciheavy + working_directory: packages/contracts-bedrock + when: on_fail + - save_cache: + name: Save Go build cache + key: golang-build-cache-contracts-bedrock-heavy-fuzz-{{ checksum "go.sum" }} + paths: + - "/root/.cache/go-build" + - notify-failures-on-develop + + # AI Contracts Test Maintenance System + # Runbook: https://github.com/ethereum-optimism/optimism/blob/develop/ops/ai-eng/contracts-test-maintenance/docs/runbook.md + ai-contracts-test: + circleci_ip_ranges: true + docker: + - image: <> + resource_class: xlarge + steps: + - checkout-from-workspace + - run: + name: Check Python version + command: python3 --version + - run: + name: Run AI Contracts Test System + command: just ai-contracts-test + working_directory: ops/ai-eng + no_output_timeout: 60m + - store_artifacts: + path: ops/ai-eng/contracts-test-maintenance/log.jsonl + destination: log.jsonl + - notify-failures-on-develop + contracts-bedrock-coverage: circleci_ip_ranges: true docker: - - image: <> + - image: <> resource_class: 2xlarge parameters: test_flags: @@ -864,12 +1035,14 @@ jobs: description: Profile to use for testing type: string default: ci + dev_features: + description: Comma-separated list of dev features to enable (e.g., "OPTIMISM_PORTAL_INTEROP,ANOTHER_FEATURE") + type: string + default: "" steps: - - checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies + - checkout-from-workspace - check-changed: - patterns: contracts-bedrock,op-node + patterns: contracts-bedrock - run: name: Print dependencies command: just dep-status @@ -896,6 +1069,8 @@ jobs: - restore_cache: name: Restore forked state key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} + - setup-dev-features: + dev_features: <> - run: name: Run coverage tests command: just coverage-lcov-all <> @@ -906,8 +1081,7 @@ jobs: no_output_timeout: <> - run: name: Print failed test traces - command: | - just test-rerun | tee failed-test-traces.log + command: just test-rerun | tee failed-test-traces.log environment: FOUNDRY_PROFILE: <> ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io @@ -940,11 +1114,9 @@ jobs: - image: <> resource_class: xlarge steps: - - checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies + - checkout-from-workspace - check-changed: - patterns: contracts-bedrock,op-node + patterns: contracts-bedrock - run: name: Print dependencies command: just dep-status @@ -1013,11 +1185,10 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies + - checkout-from-workspace - check-changed: - patterns: contracts-bedrock,op-node + patterns: contracts-bedrock + - get-target-branch - run: name: print forge version command: forge --version @@ -1058,7 +1229,7 @@ jobs: machine: image: <> steps: - - checkout-with-mise + - utils/checkout-with-mise - run: name: Install ripgrep command: sudo apt-get install -y ripgrep @@ -1082,7 +1253,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - check-changed: patterns: "<>" - attach_workspace: @@ -1108,12 +1279,17 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace + - restore_cache: + key: golangci-v1-{{ checksum ".golangci.yaml" }} - run: name: run Go linter command: | make lint-go - working_directory: . + - save_cache: + key: golangci-v1-{{ checksum ".golangci.yaml" }} + paths: + - "/home/circleci/.cache/golangci-lint" go-tests: parameters: @@ -1145,30 +1321,36 @@ jobs: description: Rule to run the tests type: string default: "go-tests-short-ci" + parallelism: + description: Number machines to distribute the tests across + type: integer + default: 1 machine: true resource_class: <> + circleci_ip_ranges: true + parallelism: <> steps: - - checkout-with-mise - - attach_workspace: - at: "." + - checkout-from-workspace - run: - name: Run all Go tests via Makefile + name: Run Go tests via Makefile no_output_timeout: <> command: | <> export TEST_TIMEOUT=<> make <> - - codecov/upload: - disable_search: true - files: ./coverage.out - store_test_results: path: ./tmp/test-results - run: name: Compress test logs - command: tar -czf testlogs.tar.gz -C ./tmp testlogs + command: | + if [ -n "$CIRCLE_NODE_TOTAL" ] && [ "$CIRCLE_NODE_TOTAL" -gt 1 ]; then + tar -czf testlogs-${CIRCLE_NODE_INDEX}-of-${CIRCLE_NODE_TOTAL}.tar.gz -C ./tmp testlogs + else + tar -czf testlogs.tar.gz -C ./tmp testlogs + fi when: always - store_artifacts: - path: testlogs.tar.gz + path: testlogs*.tar.gz when: always - when: condition: "<>" @@ -1205,9 +1387,7 @@ jobs: machine: true resource_class: <> steps: - - checkout-with-mise - - attach_workspace: - at: "." + - checkout-from-workspace - run: name: build op-program-client command: make op-program-client @@ -1244,17 +1424,8 @@ jobs: - notify-failures-on-develop: mentions: "<>" - op-acceptance-tests: + op-acceptance-sync-tests-docker: parameters: - devnet: - description: | - The name of the pre-defined kurtosis devnet to run the acceptance tests against - (e.g. 'simple', 'isthmus', 'interop'). Empty string uses - in-process testing (sysgo orchestrator). Named devnets use - external testing (sysext orchestrator) and must have a - recipe defined in kurtosis-devnet/Justfile. - type: string - default: "" gate: description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. type: string @@ -1263,56 +1434,21 @@ jobs: description: Timeout for when CircleCI kills the job if there's no output type: string default: 30m - machine: - image: ubuntu-2404:current - docker_layer_caching: true # Since we are building docker images for components, we'll cache the layers for faster builds + # Optional sync test configuration parameters + network_preset: + description: Network preset + type: string + default: "" + l2_cl_syncmode: + description: L2 CL Sync mode - can be EL Sync or CL Sync + type: string + default: "" resource_class: xlarge + docker: + - image: <> + circleci_ip_ranges: true steps: - - checkout-with-mise - - install-contracts-dependencies - # Attach workspace for cannon dependencies if using sysgo orchestrator (empty devnet) - - when: - condition: - equal: ["", << parameters.devnet >>] - steps: - - attach_workspace: - at: "." - - run: - name: Setup Kurtosis (if needed) - command: | - if [[ "<>" != "" ]]; then - echo "Setting up Kurtosis for external devnet testing..." - - # Print Kurtosis version - echo "Using Kurtosis from: $(which kurtosis || echo 'not found')" - kurtosis version - - # Start Kurtosis engine - echo "Starting Kurtosis engine..." - kurtosis engine start || true - - # Clean old instances - echo "Cleaning old instances..." - kurtosis clean -a || true - - # Check engine status - kurtosis engine status || true - - echo "Kurtosis setup complete" - else - echo "Using in-process testing (sysgo orchestrator) - no Kurtosis setup needed" - fi - # Notify us of a setup failure - - when: - condition: on_fail - steps: - - discord-notification-failures-on-develop: - mentions: "<@&1346448413172170807>" # Protocol DevX Pod - message: "Devnet <>-devnet failed to start" - - run: - name: Stop the job if the devnet failed to start - command: circleci-agent step halt - when: on_fail + - checkout-from-workspace # Restore cached Go modules - restore_cache: keys: @@ -1323,12 +1459,18 @@ jobs: name: Download Go dependencies working_directory: op-acceptance-tests command: go mod download - # Prepare the test environment - run: - name: Prepare test environment (compile tests and cache build results) + name: Lint/Vet/Build op-acceptance-tests/cmd working_directory: op-acceptance-tests - command: go test -v -c -o /dev/null $(go list -f '{{if .TestGoFiles}}{{.ImportPath}}{{end}}' ./tests/...) - # Run the acceptance tests (if the devnet is running) + command: | + just cmd-check + # Persist schedule name into env var + - run: + name: Persist schedule name into env var + command: | + echo 'export CIRCLECI_PIPELINE_SCHEDULE_NAME="<< pipeline.schedule.name >>"' >> $BASH_ENV + echo 'export CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH="<< pipeline.parameters.sync_test_op_node_dispatch >>"' >> $BASH_ENV + # Run the acceptance tests - run: name: Run acceptance tests (gate=<>) working_directory: op-acceptance-tests @@ -1337,9 +1479,12 @@ jobs: GOFLAGS: "-mod=mod" GO111MODULE: "on" GOGC: "0" + # Optional sync test configuration environment variables (only set if parameters are provided) + NETWORK_PRESET: "<>" + L2_CL_SYNCMODE: "<>" command: | # Run the tests - LOG_LEVEL=debug just acceptance-test "<>" "<>" + LOG_LEVEL=debug just acceptance-test "" "<>" - run: name: Print results (summary) working_directory: op-acceptance-tests @@ -1382,49 +1527,280 @@ jobs: steps: - store_artifacts: path: ./op-acceptance-tests/logs - # Dump kurtosis logs if external devnet was used + - notify-failures-on-develop: + mentions: "@changwan <@U08L5U8070U>" # @changwan @Anton Evangelatov + + op-acceptance-tests: + parameters: + gate: + description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. + type: string + default: "" + no_output_timeout: + description: Timeout for when CircleCI kills the job if there's no output + type: string + default: 30m + use_circleci_runner: + description: Whether to use CircleCI runners (with Docker) instead of self-hosted runners + type: boolean + default: false + machine: + image: <<# parameters.use_circleci_runner >>ubuntu-2404:current<><<^ parameters.use_circleci_runner >>true<> + docker_layer_caching: <> + resource_class: <<# parameters.use_circleci_runner >>xlarge<><<^ parameters.use_circleci_runner >>ethereum-optimism/latitude-1<> + steps: + - checkout-from-workspace + # Restore cached Go modules + - restore_cache: + keys: + - go-mod-v1-{{ checksum "go.sum" }} + - go-mod-v1- + # Download Go dependencies - run: - name: Dump kurtosis logs (if external devnet was used) - when: on_fail + name: Download Go dependencies + working_directory: op-acceptance-tests + command: go mod download + - run: + name: Lint/Vet/Build op-acceptance-tests/cmd + working_directory: op-acceptance-tests command: | - if [[ "<>" != "" ]]; then - # Dump logs & specs - kurtosis dump ./.kurtosis-dump - - # Remove spec.json files - rm -rf ./.kurtosis-dump/enclaves/**/*.json - - # Remove all unnecessary logs - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-api--* - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-logs-collector--* - rm -rf ./.kurtosis-dump/enclaves/*/task-* + just cmd-check + # Prepare the test environment + - run: + name: Prepare test environment (compile tests and cache build results) + working_directory: op-acceptance-tests + command: go test -v -c -o /dev/null $(go list -f '{{if .TestGoFiles}}{{.ImportPath}}{{end}}' ./tests/...) + # Run the acceptance tests (if the devnet is running) + - run: + name: Run acceptance tests (gate=<>) + working_directory: op-acceptance-tests + no_output_timeout: 1h + command: | + if [[ "<>" == "" ]]; then + echo "Running in gateless mode - auto-discovering all tests in ./op-acceptance-tests/..." else - echo "In-process testing was used - no kurtosis logs to dump" + echo "Running in gate mode (gate=<>)" fi + LOG_LEVEL=info just acceptance-test "" "<>" + - run: + name: Print results (summary) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/summary.log" || true + - run: + name: Print results (failures) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/failed/*.log" || true + when: on_fail + - run: + name: Print results (all) + working_directory: op-acceptance-tests + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + cat "$LOG_DIR/all.log" || true + - run: + name: Generate JUnit XML test report for CircleCI + working_directory: op-acceptance-tests + when: always + command: | + LOG_DIR=$(ls -td -- logs/* | head -1) + gotestsum --junitfile results/results.xml --raw-command cat $LOG_DIR/raw_go_events.log || true + # Save the module cache for future runs + - save_cache: + key: go-mod-v1-{{ checksum "go.sum" }} + paths: + - "/go/pkg/mod" + # Store test results and artifacts + - when: + condition: always + steps: + - store_test_results: + path: ./op-acceptance-tests/results - when: condition: always steps: - store_artifacts: - path: ./.kurtosis-dump/enclaves - destination: op-acceptance-tests/kurtosis-logs + path: ./op-acceptance-tests/logs - when: condition: on_fail steps: - discord-notification-failures-on-develop: - mentions: "Platforms (<@225161927351992320>) & Protocol (<@590878816004603924>)" # stefano, changwan - message: "Acceptance tests failed for gate <> on devnet <>" + mentions: "Platforms (<@&1346448413172170807>) & Protocol (<@590878816004603924>)" # Protocol DevX Pod, changwan + message: "Acceptance tests failed for gate <>" + + op-acceptance-tests-flake-shake: + parameters: + gate: + type: string + default: "flake-shake" + machine: + image: ubuntu-2404:current + resource_class: large + parallelism: << pipeline.parameters.flake-shake-workers >> + steps: + - checkout-from-workspace + - restore_cache: + keys: + - go-mod-v1-{{ checksum "go.sum" }} + - go-mod-v1- + - run: + name: Download Go dependencies + working_directory: op-acceptance-tests + command: go mod download + - run: + name: Lint/Vet/Build op-acceptance-tests/cmd + working_directory: op-acceptance-tests + command: | + just cmd-check + - run: + name: Calculate iterations for worker + command: | + bash ./op-acceptance-tests/scripts/ci_flake_shake_calc_iterations.sh << pipeline.parameters.flake-shake-iterations >> + - run: + name: Run flake-shake iterations + no_output_timeout: 2h + working_directory: op-acceptance-tests + command: | + OUTPUT_DIR="logs/flake-shake-results-worker-${FLAKE_SHAKE_WORKER_ID}" + mkdir -p "$OUTPUT_DIR" + op-acceptor \ + --validators ./acceptance-tests.yaml \ + --gate << parameters.gate >> \ + --testdir tests \ + --flake-shake \ + --flake-shake-iterations "$FLAKE_SHAKE_ITERATIONS" \ + --orchestrator sysgo \ + --logdir "./$OUTPUT_DIR" + - persist_to_workspace: + root: op-acceptance-tests + paths: + - logs/flake-shake-results-worker-*/ + - store_artifacts: + path: ./op-acceptance-tests/logs/flake-shake-results-worker-* + destination: flake-shake-workers + + op-acceptance-tests-flake-shake-report: + machine: + image: ubuntu-2404:current + resource_class: large + steps: + - checkout-from-workspace + - attach_workspace: + at: . + - run: + name: Lint/Vet/Build op-acceptance-tests/cmd + working_directory: op-acceptance-tests + command: | + just cmd-check + - run: + name: Build flake-shake aggregator + working_directory: op-acceptance-tests + command: | + go mod download + go build -o ../flake-shake-aggregator ./cmd/flake-shake-aggregator/main.go + - run: + name: Aggregate results + command: | + mkdir -p final-report + ./flake-shake-aggregator \ + --input-pattern "logs/flake-shake-results-worker-*/testrun-*/flake-shake-report.json" \ + --output-dir final-report \ + --verbose + - run: + name: Generate summary + command: | + bash ./op-acceptance-tests/scripts/ci_flake_shake_generate_summary.sh final-report/flake-shake-report.json final-report + - store_artifacts: + path: ./final-report + destination: flake-shake-report + + op-acceptance-tests-flake-shake-promote: + machine: + image: ubuntu-2404:current + resource_class: large + steps: + - checkout-from-workspace + - run: + name: Lint/Vet/Build op-acceptance-tests/cmd + working_directory: op-acceptance-tests + command: | + just cmd-check + - run: + name: Build flake-shake promoter + working_directory: op-acceptance-tests + command: | + go mod download + go build -o ../flake-shake-promoter ./cmd/flake-shake-promoter/main.go + - run: + name: Set GH_TOKEN + command: | + if [ -n "${GITHUB_TOKEN_GOVERNANCE:-}" ]; then + echo "export GH_TOKEN=${GITHUB_TOKEN_GOVERNANCE}" >> "$BASH_ENV" + fi + - run: + name: Validate GH_TOKEN is present + command: | + if [ -z "${GH_TOKEN:-}" ]; then + echo "GH_TOKEN is required for PR creation" >&2 + exit 1 + fi + - run: + name: Run flake-shake promoter + command: | + ./flake-shake-promoter \ + --org ethereum-optimism \ + --repo optimism \ + --branch "<< pipeline.git.branch >>" \ + --workflow scheduled-flake-shake \ + --report-job op-acceptance-tests-flake-shake-report \ + --days 3 \ + --gate flake-shake \ + --min-runs 300 \ + --max-failure-rate 0.01 \ + --min-age-days 3 \ + --dry-run=false \ + --require-clean-24h \ + --out ./final-promotion \ + --verbose + - store_artifacts: + path: ./final-promotion + destination: flake-shake-promotion + - run: + name: Prepare Slack message (promotion candidates) + command: | + bash ./op-acceptance-tests/scripts/ci_flake_shake_prepare_slack.sh ./final-promotion/promotion-ready.json + - run: + name: Slack - Sending Notification + command: | + set -euo pipefail + # The Slack orb conditionals evaluate at compile time; guard at runtime instead. + if [ -z "${SLACK_BLOCKS_PAYLOAD:-}" ] || [ "${SLACK_BLOCKS_PAYLOAD}" = "[]" ]; then + echo "SLACK_BLOCKS is empty or doesn't exist. Skipping it..." + exit 0 + fi + echo "$SLACK_BLOCKS_PAYLOAD" | jq '.' > /tmp/blocks.json + jq -c '{blocks: .}' /tmp/blocks.json > /tmp/slack_template.json + echo 'export SLACK_TEMPLATE=$(cat /tmp/slack_template.json)' >> $BASH_ENV + - slack/notify: + channel: notify-ci-failures + event: always + retries: 1 + retry_delay: 3 + template: SLACK_TEMPLATE sanitize-op-program: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise - # Already installed on qkc/ax101 - # - run: - # name: Install tools - # command: | - # sudo apt-get update - # sudo apt-get install -y binutils-mips-linux-gnu + - checkout-from-workspace + - run: + name: Install tools + command: | + sudo apt-get update + sudo apt-get install -y binutils-mips-linux-gnu - run: name: Build cannon command: make cannon @@ -1440,7 +1816,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - restore_cache: name: Restore cannon prestate cache key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} @@ -1467,7 +1843,7 @@ jobs: docker: - image: <> steps: - - checkout-with-mise + - checkout-from-workspace - setup_remote_docker - run: name: Build prestates @@ -1482,7 +1858,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - utils/checkout-with-mise - attach_workspace: at: "." - gcp-cli/install @@ -1490,6 +1866,7 @@ jobs: gcp_cred_config_file_path: /tmp/gcp_cred_config.json oidc_token_file_path: /tmp/oidc_token.json - run: + no_output_timeout: 30m name: Upload cannon prestates command: | # Use the actual hash for tags (hash can be found by reading releases.json) @@ -1502,12 +1879,20 @@ jobs: echo "Publishing ${PRESTATE_MT64_HASH}, ${PRESTATE_MT64NEXT_HASH}, ${PRESTATE_INTEROP_HASH}, ${PRESTATE_INTEROP_NEXT_HASH} as ${BRANCH_NAME}" if [[ "" != "<< pipeline.git.branch >>" ]] then + echo "Publishing commit hash data" + INFO_FILE=$(mktemp) # Upload the git commit info for each prestate since this won't be recorded in releases.json - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64NEXT_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64Next.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interop.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_NEXT_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interopNext.bin.gz.txt" - + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64_HASH}") > "${INFO_FILE}" + gsutil cp "${INFO_FILE}" "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64.bin.gz.txt" + echo "Published commit hash data successfully" # So we know if any uploads worked + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64NEXT_HASH}") > "${INFO_FILE}" + gsutil cp "${INFO_FILE}" "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64Next.bin.gz.txt" + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_HASH}") > "${INFO_FILE}" + gsutil cp "${INFO_FILE}" "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interop.bin.gz.txt" + (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_NEXT_HASH}") > "${INFO_FILE}" + gsutil cp "${INFO_FILE}" "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interopNext.bin.gz.txt" + rm "${INFO_FILE}" # keep things tidy + echo "All commit info published" # Use the branch name for branches to provide a consistent URL PRESTATE_MT64_HASH="${BRANCH_NAME}-mt64" @@ -1533,7 +1918,7 @@ jobs: docker: - image: <> steps: - - checkout-with-mise + - utils/checkout-with-mise - setup_remote_docker - run: name: Verify reproducibility @@ -1548,7 +1933,7 @@ jobs: docker: - image: <> steps: - - checkout-with-mise + - utils/checkout-with-mise - setup_remote_docker - run: name: Build cannon @@ -1615,7 +2000,7 @@ jobs: - image: <> resource_class: xlarge steps: - - checkout-with-mise + - checkout-from-workspace - setup_remote_docker - run: name: Run Analyzer @@ -1627,7 +2012,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - run: name: Verify Compatibility command: | @@ -1638,7 +2023,7 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - check-changed: patterns: op-node - run: @@ -1649,19 +2034,28 @@ jobs: machine: true resource_class: qkc/ax101 steps: - - checkout-with-mise + - checkout-from-workspace - check-changed: patterns: op-service - run: name: check-generated-mocks command: make generate-mocks-op-service && git diff --exit-code + op-deployer-forge-version: + docker: + - image: <> + steps: + - checkout-from-workspace + - run: + command: just check-forge-version + working_directory: op-deployer + kontrol-tests: docker: - image: <> resource_class: xlarge steps: - - checkout-with-mise + - utils/checkout-with-mise - install-contracts-dependencies - check-changed: no_go_deps: "true" @@ -1698,7 +2092,7 @@ jobs: oidc_token_file_path: /tmp/oidc_token.json project_id: GCP_TOOLS_ARTIFACTS_PROJECT_ID service_account_email: GCP_CONTRACTS_PUBLISHER_SERVICE_ACCOUNT_EMAIL - - checkout-with-mise + - utils/checkout-with-mise - install-contracts-dependencies - run: name: Pull artifacts @@ -1734,7 +2128,8 @@ jobs: - gcp-oidc-authenticate: gcp_cred_config_file_path: /tmp/gcp_cred_config.json oidc_token_file_path: /tmp/oidc_token.json - - checkout-with-mise + - utils/checkout-with-mise + - attach_workspace: { at: "." } - run: name: Configure Docker command: | @@ -1749,10 +2144,7 @@ jobs: - image: <> resource_class: medium steps: - - checkout-with-mise - - check-changed: - patterns: packages/contracts-bedrock - - install-contracts-dependencies + - checkout-from-workspace - run: name: Build contracts command: | @@ -1773,18 +2165,18 @@ jobs: echo "✅ Checked-in forge artifacts match the ci build" stale-check: - machine: - image: ubuntu-2204:2024.08.1 - steps: - - utils/github-stale: - stale-issue-message: 'This issue has been automatically marked as stale and will be closed in 5 days if no updates' - stale-pr-message: 'This pr has been automatically marked as stale and will be closed in 5 days if no updates' - close-issue-message: 'This issue was closed as stale. Please reopen if this is a mistake' - close-pr-message: 'This PR was closed as stale. Please reopen if this is a mistake' - days-before-issue-stale: 999 - days-before-pr-stale: 14 - days-before-issue-close: 5 - days-before-pr-close: 5 + machine: + image: ubuntu-2204:2024.08.1 + steps: + - utils/github-stale: + stale-issue-message: "This issue has been automatically marked as stale and will be closed in 5 days if no updates" + stale-pr-message: "This pr has been automatically marked as stale and will be closed in 5 days if no updates" + close-issue-message: "This issue was closed as stale. Please reopen if this is a mistake" + close-pr-message: "This PR was closed as stale. Please reopen if this is a mistake" + days-before-issue-stale: 999 + days-before-pr-stale: 14 + days-before-issue-close: 5 + days-before-pr-close: 5 close-issue: machine: @@ -1815,7 +2207,7 @@ jobs: docker: - image: <> steps: - - checkout-with-mise + - utils/checkout-with-mise - run: name: Collect devnet metrics for op-acceptance-tests command: | @@ -1838,7 +2230,7 @@ jobs: machine: true resource_class: medium steps: - - checkout-with-mise + - utils/checkout-with-mise - run: name: Generate flaky acceptance tests report command: | @@ -1861,25 +2253,89 @@ jobs: path: ./op-acceptance-tests/reports destination: flaky-test-reports - workflows: + # Nightly Kurtosis acceptance tests + scheduled-kurtosis-acceptance-tests: + when: + or: + - equal: [build_daily, <>] + - and: + - equal: [true, << pipeline.parameters.kurtosis_acceptance_tests_dispatch >>] + - equal: ["api", << pipeline.trigger_source >>] + jobs: + - initialize: + context: + - circleci-repo-readonly-authenticated-github-token + - contracts-bedrock-build: # needed for in-process tests that some suites may use + build_args: --skip test + context: + - circleci-repo-readonly-authenticated-github-token + requires: + - initialize + - cannon-prestate-quick: # needed for sysgo tests (if any package is in-memory) + context: + - circleci-repo-readonly-authenticated-github-token + requires: + - initialize + - op-acceptance-tests-kurtosis: + name: kurtosis-simple-nightly + devnet: simple + gate: base + use_circleci_runner: true + no_output_timeout: 60m + context: + - circleci-repo-readonly-authenticated-github-token + - discord + requires: + - contracts-bedrock-build + - cannon-prestate-quick + - op-acceptance-tests-kurtosis: + name: kurtosis-jovian-nightly + devnet: jovian + gate: jovian + use_circleci_runner: true + no_output_timeout: 60m + context: + - circleci-repo-readonly-authenticated-github-token + - discord + requires: + - contracts-bedrock-build + - cannon-prestate-quick + - op-acceptance-tests-kurtosis: + name: kurtosis-interop-nightly + devnet: interop + gate: interop + use_circleci_runner: true + no_output_timeout: 60m + context: + - circleci-repo-readonly-authenticated-github-token + - discord + requires: + - contracts-bedrock-build + - cannon-prestate-quick main: when: - and: - - or: - # Trigger on new commits - # - equal: [ webhook, << pipeline.trigger_source >> ] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.main_dispatch >>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] + or: + - equal: ["webhook", << pipeline.trigger_source >>] + - and: + - equal: [true, <>] + - equal: ["api", << pipeline.trigger_source >>] + - equal: [ + << pipeline.parameters.github-event-type >>, + "__not_set__", + ] #this is to prevent triggering this workflow as the default value is always set for main_dispatch jobs: + - initialize: + context: + - circleci-repo-readonly-authenticated-github-token - contracts-bedrock-build: name: contracts-bedrock-build # Build with just core + script contracts. build_args: --deny-warnings --skip test context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - check-kontrol-build: requires: - contracts-bedrock-build @@ -1891,12 +2347,17 @@ workflows: test_list: find test -name "*.t.sol" -not -name "PreimageOracle.t.sol" context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize + check_changed_patterns: contracts-bedrock,op-node - contracts-bedrock-tests: # PreimageOracle test is slow, run it separately to unblock CI. name: contracts-bedrock-tests-preimage-oracle test_list: find test -name "PreimageOracle.t.sol" context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - contracts-bedrock-tests: # Heavily fuzz any fuzz tests within added or modified test files. name: contracts-bedrock-tests-heavy-fuzz-modified @@ -1905,75 +2366,71 @@ workflows: test_profile: ciheavy context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - contracts-bedrock-coverage: # Generate coverage reports. - name: contracts-bedrock-coverage + name: contracts-bedrock-coverage <> test_timeout: 1h test_profile: cicoverage + dev_features: <> + matrix: + parameters: + dev_features: ["main", "OPTIMISM_PORTAL_INTEROP","CANNON_KONA","CANNON_KONA,DEPLOY_V2_DISPUTE_GAMES"] # need this requires to ensure that all FFI JSONs exist requires: - contracts-bedrock-build context: - circleci-repo-readonly-authenticated-github-token - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade - fork_op_chain: op + name: contracts-bedrock-tests-upgrade <>-mainnet + fork_op_chain: <> fork_base_chain: mainnet fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + matrix: + parameters: + fork_op_chain: ["op", "base", "ink", "unichain"] context: - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade base-mainnet - fork_op_chain: base - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + requires: + - initialize + - contracts-bedrock-checks: + requires: + - contracts-bedrock-build context: - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade ink-mainnet - fork_op_chain: ink - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + - diff-fetcher-forge-artifacts: context: - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade unichain-mainnet - fork_op_chain: unichain - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + requires: + - contracts-bedrock-build + - op-deployer-forge-version: context: - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-checks: requires: - - contracts-bedrock-build - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # Skip frozen check for op-es - # - contracts-bedrock-frozen-code: - # requires: - # - contracts-bedrock-build - # context: - # - circleci-repo-readonly-authenticated-github-token - - diff-fetcher-forge-artifacts: - context: - - circleci-repo-readonly-authenticated-github-token - - diff-asterisc-bytecode: + - initialize + - diff-asterisc-bytecode: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - semgrep-scan: name: semgrep-scan-local scan_command: semgrep scan --timeout=100 --config .semgrep/rules/ --error . context: + - slack - circleci-repo-readonly-authenticated-github-token - semgrep-scan: name: semgrep-test scan_command: semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ context: + - slack - circleci-repo-readonly-authenticated-github-token - go-lint: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - fuzz-golang: name: fuzz-golang-<> on_changes: <> @@ -1986,43 +2443,50 @@ workflows: - op-chain-ops context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - fuzz-golang: name: cannon-fuzz package_name: cannon on_changes: cannon,packages/contracts-bedrock/src/cannon uses_artifacts: true - requires: ["contracts-bedrock-build"] context: - circleci-repo-readonly-authenticated-github-token + requires: + - contracts-bedrock-build - fuzz-golang: name: op-e2e-fuzz package_name: op-e2e on_changes: op-e2e,packages/contracts-bedrock/src uses_artifacts: true - requires: ["contracts-bedrock-build"] context: - circleci-repo-readonly-authenticated-github-token - - go-tests: - name: go-tests-short - no_output_timeout: 19m - test_timeout: 20m requires: - contracts-bedrock-build - - cannon-prestate-quick - context: - - circleci-repo-readonly-authenticated-github-token - filters: - branches: - ignore: op-es # Run on all branches EXCEPT develop (PR branches only) + # Always run full tests on qkc + # - go-tests: + # name: go-tests-short + # parallelism: 4 + # no_output_timeout: 19m + # test_timeout: 20m + # requires: + # - contracts-bedrock-build + # - cannon-prestate-quick + # context: + # - circleci-repo-readonly-authenticated-github-token + # filters: + # branches: + # ignore: develop # Run on all branches EXCEPT develop (PR branches only) - go-tests: name: go-tests-full - rule: "go-tests-ci" # Run full test suite instead of short - no_output_timeout: 89m # Longer timeout for full tests + rule: "go-tests-ci" # Run full test suite instead of short + parallelism: 4 + no_output_timeout: 89m # Longer timeout for full tests test_timeout: 90m notify: true - filters: - branches: - only: op-es # Only runs on develop branch (post-merge) + # filters: + # branches: + # only: develop # Only runs on develop branch (post-merge) requires: - contracts-bedrock-build - cannon-prestate-quick @@ -2032,9 +2496,13 @@ workflows: - analyze-op-program-client: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - op-program-compat: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - bedrock-go-tests: requires: - go-lint @@ -2066,6 +2534,7 @@ workflows: - op-proposer - op-challenger - op-dispute-mon + - op-deployer - op-conductor - da-server - op-supervisor @@ -2075,9 +2544,13 @@ workflows: - op-interop-mon context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - cannon-prestate-quick: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - sanitize-op-program: requires: - cannon-prestate-quick @@ -2086,9 +2559,13 @@ workflows: - check-generated-mocks-op-node: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - check-generated-mocks-op-service: context: - circleci-repo-readonly-authenticated-github-token + requires: + - initialize - cannon-go-lint-and-test: requires: - contracts-bedrock-build @@ -2109,8 +2586,28 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - # go-release-deployer: + # go-release-op-deployer: # jobs: + # - initialize: + # filters: + # tags: + # only: /^op-deployer.*/ + # branches: + # ignore: /.*/ + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: + # name: build-contracts-go-release-op-deployer + # filters: + # tags: + # only: /^op-deployer.*/ + # branches: + # ignore: /.*/ + # build_args: --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize # - go-release: # filters: # tags: @@ -2120,15 +2617,53 @@ workflows: # module: op-deployer # context: # - oplabs-gcr-release + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - build-contracts-go-release-op-deployer + + # go-release-op-up: + # jobs: + # - initialize: + # filters: + # tags: + # only: /^op-up.*/ + # branches: + # ignore: /.*/ + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: + # name: build-contracts-go-release-op-up + # filters: + # tags: + # only: /^op-up.*/ + # branches: + # ignore: /.*/ + # build_args: --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - go-release: + # filters: + # tags: + # only: /^op-up.*/ + # branches: + # ignore: /.*/ + # module: op-up + # context: + # - oplabs-gcr-release + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - build-contracts-go-release-op-up # release: # when: # not: # equal: [scheduled_pipeline, << pipeline.trigger_source >>] # jobs: - # # Wait for approval on the release - # - hold: - # type: approval + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token # filters: # tags: # only: /^(da-server|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ @@ -2167,7 +2702,7 @@ workflows: # context: # - oplabs-gcr-release # requires: - # - hold + # - initialize # # Checks for cross-platform images go here # - check-cross-platform: # matrix: @@ -2214,13 +2749,14 @@ workflows: # ignore: /.*/ # context: # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize # - publish-cannon-prestates: # context: # - circleci-repo-readonly-authenticated-github-token # - slack # - oplabs-network-optimism-io-bucket # requires: - # - hold # - cannon-prestate # filters: # tags: @@ -2242,50 +2778,62 @@ workflows: # when: # or: # - and: - # - equal: ["develop", <>] - # - equal: ["webhook",<< pipeline.trigger_source >>] + # - equal: ["develop", <>] + # - equal: ["webhook", << pipeline.trigger_source >>] # - and: - # - equal: [true, <>] - # - equal: ["api",<< pipeline.trigger_source >>] + # - equal: + # [ + # true, + # <>, + # ] + # - equal: ["api", << pipeline.trigger_source >>] # jobs: # - publish-contract-artifacts: # context: # - circleci-repo-readonly-authenticated-github-token - develop-fault-proofs: - when: - and: - - or: - - equal: ["op-es", <>] - - equal: [true, <>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - - cannon-prestate: - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-stf-verify: - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - go-tests-with-fault-proof-deps: - name: op-e2e-cannon-tests - notify: true - mentions: "@proofs-team" - no_output_timeout: 3h - test_timeout: 3h - resource_class: qkc/ax101 - context: - - slack - - circleci-repo-readonly-authenticated-github-token - requires: - - contracts-bedrock-build - - cannon-prestate + # develop-fault-proofs: + # when: + # or: + # - and: + # - equal: ["develop", <>] + # - equal: ["webhook", << pipeline.trigger_source >>] + # - and: + # - equal: [true, <>] + # - equal: ["api", << pipeline.trigger_source >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - cannon-prestate: + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - cannon-stf-verify: + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: + # build_args: --deny-warnings --skip test + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - go-tests-with-fault-proof-deps: + # name: op-e2e-cannon-tests + # notify: true + # mentions: "@proofs-team" + # no_output_timeout: 3h + # test_timeout: 3h + # resource_class: qkc/ax101 + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - contracts-bedrock-build + # - cannon-prestate # - publish-cannon-prestates: # context: # - slack @@ -2299,11 +2847,11 @@ workflows: # when: # or: # - and: - # - equal: ["develop", <>] - # - equal: ["webhook",<< pipeline.trigger_source >>] + # - equal: ["develop", <>] + # - equal: ["webhook", << pipeline.trigger_source >>] # - and: - # - equal: [true, <>] - # - equal: ["api",<< pipeline.trigger_source >>] + # - equal: [true, <>] + # - equal: ["api", << pipeline.trigger_source >>] # jobs: # - kontrol-tests: # context: @@ -2311,25 +2859,30 @@ workflows: # - runtimeverification # - circleci-repo-readonly-authenticated-github-token - scheduled-cannon-full-tests: - when: - or: - - equal: [build_four_hours, <>] - - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] - jobs: - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-go-lint-and-test: - requires: - - contracts-bedrock-build - skip_slow_tests: false - no_output_timeout: 30m - notify: true - context: - - slack - - circleci-repo-readonly-authenticated-github-token + # scheduled-cannon-full-tests: + # when: + # or: + # - equal: [build_four_hours, <>] + # - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: + # build_args: --deny-warnings --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - cannon-go-lint-and-test: + # requires: + # - contracts-bedrock-build + # skip_slow_tests: false + # no_output_timeout: 30m + # notify: true + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token # scheduled-docker-publish: # when: @@ -2338,6 +2891,9 @@ workflows: # # Trigger on manual triggers if explicitly requested # - equal: [true, << pipeline.parameters.docker_publish_dispatch >>] # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token # - docker-build: # matrix: # parameters: @@ -2363,6 +2919,8 @@ workflows: # - oplabs-gcr # - slack # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize # - check-cross-platform: # matrix: # parameters: @@ -2386,17 +2944,57 @@ workflows: # context: # - circleci-repo-readonly-authenticated-github-token - scheduled-preimage-reproducibility: - when: - or: - - equal: [build_daily, <>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] - jobs: - - preimage-reproducibility: - context: - - slack - - circleci-repo-readonly-authenticated-github-token + # scheduled-flake-shake: + # when: + # or: + # - equal: [build_daily, << pipeline.schedule.name >>] + # - and: + # - equal: [true, << pipeline.parameters.flake-shake-dispatch >>] + # - equal: ["api", << pipeline.trigger_source >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: + # build_args: --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - cannon-prestate-quick: + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - op-acceptance-tests-flake-shake: + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - contracts-bedrock-build + # - cannon-prestate-quick + # - op-acceptance-tests-flake-shake-report: + # requires: + # - op-acceptance-tests-flake-shake + # - op-acceptance-tests-flake-shake-promote: + # requires: + # - op-acceptance-tests-flake-shake-report + # context: + # - circleci-repo-readonly-authenticated-github-token + # - circleci-repo-optimism + # - circleci-api-token + # - slack + + # scheduled-preimage-reproducibility: + # when: + # or: + # - equal: [build_daily, <>] + # # Trigger on manual triggers if explicitly requested + # - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] + # jobs: + # - preimage-reproducibility: + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token # scheduled-stale-check: # when: @@ -2409,166 +3007,139 @@ workflows: # context: # - circleci-repo-optimism - # Acceptance tests (post-merge to develop) - acceptance-tests: - when: - or: - - and: - - equal: ["develop", <>] - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <>] - - equal: ["api",<< pipeline.trigger_source >>] - jobs: - - contracts-bedrock-build: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-prestate-quick: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - # IN-PROCESS (base) - - op-acceptance-tests: - # Acceptance Testing params - name: memory-base - gate: base - # CircleCI params - no_output_timeout: 10m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - contracts-bedrock-build - - cannon-prestate-quick - # KURTOSIS (Simple) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-simple - devnet: simple - gate: base - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # KURTOSIS (Isthmus) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-isthmus - devnet: isthmus - gate: isthmus - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # KURTOSIS (Interop) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-interop - devnet: interop - gate: interop - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # Generate flaky test report - - generate-flaky-report: - name: generate-flaky-tests-report - context: - - circleci-repo-readonly-authenticated-github-token - - circleci-api-token + # scheduled-sync-test-op-node: + # when: + # or: + # - equal: [build_daily, <>] + # # Trigger on manual triggers if explicitly requested + # - equal: [true, << pipeline.parameters.sync_test_op_node_dispatch >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: # needed for sysgo tests + # build_args: --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - cannon-prestate-quick: # needed for sysgo tests + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - op-acceptance-sync-tests-docker: + # name: "sync-test-<>-daily-<>" + # gate: sync-test-op-node + # no_output_timeout: 30m + # context: + # - circleci-repo-readonly-authenticated-github-token + # - discord + # requires: + # - contracts-bedrock-build + # - cannon-prestate-quick + # matrix: + # parameters: + # network_preset: ["op-sepolia", "base-sepolia", "unichain-sepolia", "op-mainnet", "base-mainnet"] + # l2_cl_syncmode: ["consensus-layer", "execution-layer"] - # Acceptance tests (pre-merge to develop) - acceptance-tests-pr: - when: - not: - equal: [<< pipeline.git.branch >>, "develop"] - jobs: - - contracts-bedrock-build: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-prestate-quick: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - # IN-PROCESS (base) - - op-acceptance-tests: - # Acceptance Testing params - name: memory-base - gate: base - # CircleCI params - no_output_timeout: 10m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - contracts-bedrock-build - - cannon-prestate-quick - # KURTOSIS (Simple) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-simple - devnet: simple - gate: base - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # KURTOSIS (Isthmus) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-isthmus - devnet: isthmus - gate: isthmus - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # KURTOSIS (Interop) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-interop - devnet: interop - gate: interop - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - # Generate flaky test report - - generate-flaky-report: - name: generate-flaky-tests-report - context: - - circleci-repo-readonly-authenticated-github-token - - circleci-api-token + # scheduled-heavy-fuzz-tests: + # when: + # or: + # - equal: [build_daily, <>] + # - equal: [true, << pipeline.parameters.heavy_fuzz_dispatch >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-heavy-fuzz-nightly: + # context: + # - slack + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize - close-issue-workflow: - when: - and: - - equal: [<< pipeline.trigger_source >>, "api"] - - equal: [<< pipeline.parameters.github-event-type >>, "pull_request"] - - equal: [<< pipeline.parameters.github-event-action >>, "labeled"] - jobs: - - close-issue: - label_name: "auto-close-trivial-contribution" - message: "Thank you for your interest in contributing! - At this time, we are not accepting contributions that primarily fix spelling, stylistic, or grammatical errors in documentation, code, or elsewhere. - Please check our [contribution guidelines](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md#contributions-related-to-spelling-and-grammar) for more information. - This issue will be closed now." - context: - - circleci-repo-optimism + # # Acceptance tests + # acceptance-tests: + # when: + # or: + # - equal: ["webhook", << pipeline.trigger_source >>] + # # Manual dispatch + # - and: + # - equal: [true, <>] + # - equal: ["api", << pipeline.trigger_source >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - contracts-bedrock-build: # needed for sysgo tests + # build_args: --skip test + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # - cannon-prestate-quick: # needed for sysgo tests + # context: + # - circleci-repo-readonly-authenticated-github-token + # requires: + # - initialize + # # IN-MEMORY (all) + # - op-acceptance-tests: + # name: memory-all + # gate: "" # Empty gate = gateless mode + # no_output_timeout: 90m + # context: + # - circleci-repo-readonly-authenticated-github-token + # - discord + # requires: + # - contracts-bedrock-build + # - cannon-prestate-quick + # # Generate flaky test report + # - generate-flaky-report: + # name: generate-flaky-tests-report + # context: + # - circleci-repo-readonly-authenticated-github-token + # - circleci-api-token - devnet-metrics-collect: - when: - or: - - equal: [<< pipeline.trigger_source >>, "webhook"] - - and: - - equal: [true, << pipeline.parameters.devnet-metrics-collect >>] - - equal: [<< pipeline.trigger_source >>, "api"] - jobs: - - devnet-metrics-collect-authorship: - context: - - circleci-repo-readonly-authenticated-github-token - - oplabs-tools-data-public-metrics-bucket + # close-issue-workflow: + # when: + # and: + # - equal: [<< pipeline.trigger_source >>, "api"] + # - equal: [<< pipeline.parameters.github-event-type >>, "pull_request"] + # - equal: [<< pipeline.parameters.github-event-action >>, "labeled"] + # jobs: + # - close-issue: + # label_name: "auto-close-trivial-contribution" + # message: "Thank you for your interest in contributing! + # At this time, we are not accepting contributions that primarily fix spelling, stylistic, or grammatical errors in documentation, code, or elsewhere. + # Please check our [contribution guidelines](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md#contributions-related-to-spelling-and-grammar) for more information. + # This issue will be closed now." + # context: + # - circleci-repo-optimism + + # devnet-metrics-collect: + # when: + # or: + # - equal: [<< pipeline.trigger_source >>, "webhook"] + # - and: + # - equal: [true, << pipeline.parameters.devnet-metrics-collect >>] + # - equal: [<< pipeline.trigger_source >>, "api"] + # jobs: + # - devnet-metrics-collect-authorship: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - oplabs-tools-data-public-metrics-bucket + + # ai-contracts-test-workflow: + # when: + # equal: [true, << pipeline.parameters.ai_contracts_test_dispatch >>] + # jobs: + # - initialize: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - ai-contracts-test: + # context: + # - circleci-repo-readonly-authenticated-github-token + # - devin-api + # requires: + # - initialize diff --git a/.cursor/rules/solidity-styles.mdc b/.cursor/rules/solidity-styles.mdc index be71010725287..31d243a9bc358 100644 --- a/.cursor/rules/solidity-styles.mdc +++ b/.cursor/rules/solidity-styles.mdc @@ -36,8 +36,8 @@ Applies to Solidity files. - Contracts should be built assuming upgradeability by default - Extend OpenZeppelin's `Initializable` or base contract - Use the `ReinitializableBase` contract -- Include an `initialize` function with the `reinitializer(initValue())` modifier -- If including an `upgrade` function, use the `reinitializer(initValue())` modifier +- Include an `initialize` function with the `reinitializer(initVersion())` modifier +- If including an `upgrade` function, use the `reinitializer(initVersion())` modifier - In the constructor: - Call `_disableInitializers()` - Set any immutables (though generally avoid immutables) @@ -63,7 +63,7 @@ Applies to Solidity files. - `method`: `test`, `testFuzz`, or `testDiff` - `status`: `succeeds`, `reverts`, `works`, `fails`, or `benchmark` - Test contract naming: - - `TargetContract_Init`: for basic setup contracts + - `TargetContract_TestInit`: for basic setup contracts - `TargetContract_FunctionName_Test`: for happy path tests - `TargetContract_FunctionName_TestFail`: for sad path tests - `TargetContract_Harness`: for harness contracts diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index c9cb8ad6f1d3d..27ead6d7544f1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -28,10 +28,6 @@ /op-e2e/faultproofs @ethereum-optimism/proofs @ethereum-optimism/go-reviewers # Ops -/.circleci @ethereum-optimism/monorepo-ops-reviewers -/.github @ethereum-optimism/monorepo-ops-reviewers -/ops @ethereum-optimism/monorepo-ops-reviewers -/docker-bake.hcl @ethereum-optimism/monorepo-ops-reviewers /.cursor/rules/solidity-styles.mdc @ethereum-optimism/contract-reviewers # Contracts diff --git a/.github/cliff.toml b/.github/cliff.toml new file mode 100644 index 0000000000000..ec78ce0504b8c --- /dev/null +++ b/.github/cliff.toml @@ -0,0 +1,82 @@ +# git-cliff ~ configuration file +# https://git-cliff.org/docs/configuration + +[remote.github] +owner = "ethereum-optimism" +repo = "optimism" +# token = "" # Set via the GITHUB_TOKEN environment variable. + +[changelog] +# A Tera template to be rendered for each release in the changelog. +# See https://keats.github.io/tera/docs/#introduction +body = """ +## What's Changed + +{%- if version %} in {{ version }}{%- endif -%} +{% for commit in commits %} + {% if commit.remote.pr_title -%} + {%- set commit_message = commit.remote.pr_title -%} + {%- else -%} + {%- set commit_message = commit.message -%} + {%- endif -%} + * {{ commit_message | split(pat="\n") | first | trim }}\ + {% if commit.remote.username %} by @{{ commit.remote.username }}{%- endif -%} + {% if commit.remote.pr_number %} in \ + [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }}) \ + {%- endif %} +{%- endfor -%} + +{%- if github -%} +{% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %} + {% raw %}\n{% endraw -%} + ## New Contributors +{%- endif %}\ +{% for contributor in github.contributors | filter(attribute="is_first_time", value=true) %} + * @{{ contributor.username }} made their first contribution + {%- if contributor.pr_number %} in \ + [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \ + {%- endif %} +{%- endfor -%} +{%- endif -%} + +{% if version %} + {% if previous.version %} + **Full Changelog**: {{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }} + {% endif %} + 🚢 Docker Image: https://us-docker.pkg.dev/oplabs-tools-artifacts/images/{{ version | replace(from="/", to=":") }} +{% else -%} + {% raw %}\n{% endraw %} +{% endif %} + +{%- macro remote_url() -%} + https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }} +{%- endmacro -%} +""" +# Remove leading and trailing whitespaces from the changelog's body. +trim = true +# A Tera template to be rendered as the changelog's footer. +# See https://keats.github.io/tera/docs/#introduction +footer = """ + +""" +# An array of regex based postprocessors to modify the changelog. +# Replace the placeholder `` with a URL. +postprocessors = [] + +[git] +# Parse commits according to the conventional commits specification. +# See https://www.conventionalcommits.org +conventional_commits = false +# Exclude commits that do not match the conventional commits specification. +filter_unconventional = false +# Split commits on newlines, treating each line as an individual commit. +split_commits = false +# An array of regex based parsers to modify commit messages prior to further processing. +commit_preprocessors = [{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "" }] +# Exclude commits that are not matched by any commit parser. +filter_commits = false +# Order releases topologically instead of chronologically. +topo_order = false +# Order of commits in each group/release within the changelog. +# Allowed values: newest, oldest +sort_commits = "newest" diff --git a/.gitignore b/.gitignore index ea86d7c683480..bf22c3755f50c 100644 --- a/.gitignore +++ b/.gitignore @@ -22,7 +22,6 @@ cache !op-deployer/pkg/deployer/artifacts - packages/contracts-bedrock/deployments/anvil # vim @@ -45,8 +44,10 @@ packages/contracts-bedrock/deployments/anvil coverage.out - __pycache__ # Ignore echidna artifacts crytic-export + +# ignore local asdf config +.tool-versions diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000000000..9f4ca6b435a0f --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,36 @@ +linters: + enable: + - goimports + - sqlclosecheck + - bodyclose + - asciicheck + - misspell + - errorlint + + # Only enabled in specific cases. See settings and exclusions below + - exhaustruct + - err113 + + # Enabled by default but be explicit so it's easy to see what we're running + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused +linters-settings: + # Require specifying all fields in op-deployer's OPCM input and output structs + exhaustruct: + include: + - '.*op-deployer/pkg/deployer/opcm\..*(Input|Output)$' +issues: + exclude: + - 'errors.As' + - 'errors.Is' + exclude-rules: + # Only apply err113 to op-program/client + - path-except: 'op-program/client/.*' + linters: + - err113 +run: + timeout: 5m diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index d656881df9368..31cacaf334ee0 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -110,6 +110,7 @@ rules: exclude: - packages/contracts-bedrock/test - packages/contracts-bedrock/scripts + - ops/ai-eng/contracts-test-maintenance/prompt/prompt.md - id: sol-style-input-arg-fmt languages: [solidity] @@ -319,6 +320,7 @@ rules: exclude: - packages/contracts-bedrock/src/L1/OPContractsManager.sol - packages/contracts-bedrock/src/L1/OptimismPortal2.sol + - packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol - packages/contracts-bedrock/src/L2/FeeVault.sol - packages/contracts-bedrock/src/L2/OptimismMintableERC721.sol - packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol @@ -328,7 +330,9 @@ rules: - packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol - packages/contracts-bedrock/src/dispute/DelayedWETH.sol - packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol + - packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol - packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol + - packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol - packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol - packages/contracts-bedrock/src/dispute/SuperPermissionedDisputeGame.sol - packages/contracts-bedrock/src/governance/MintManager.sol @@ -362,3 +366,20 @@ rules: paths: exclude: - packages/contracts-bedrock/scripts/libraries/Config.sol + + - id: sol-style-event-param-fmt + languages: [solidity] + severity: ERROR + message: Event parameters must be named using camelCase and must not be prefixed with underscore + pattern-either: + # Match parameters with underscore prefix + - pattern-regex: event\s+\w+\s*\([^)]*\b(?:address|uint\d*|int\d*|bytes\d*|bool|string)\s+(?:indexed\s+)?_\w+ + # Match unnamed parameters (type with optional indexed but no parameter name before comma or closing paren) + - pattern-regex: event\s+\w+\s*\([^)]*\b(?:address|uint\d*|int\d*|bytes\d*|bool|string)\b(?:\s+indexed)?\s*[,)] + # Match parameters that are all uppercase (like NEW_OWNER) + - pattern-regex: event\s+\w+\s*\([^)]*\b(?:address|uint\d*|int\d*|bytes\d*|bool|string)\s+(?:indexed\s+)?[A-Z][A-Z0-9_]*\s*[,)] + paths: + exclude: + # LegacyMintableERC20 and the corresponding interface use legacy naming conventions. + - packages/contracts-bedrock/src/legacy/LegacyMintableERC20.sol + - packages/contracts-bedrock/interfaces/legacy/ILegacyMintableERC20Full.sol diff --git a/.semgrep/tests/sol-rules.t.sol b/.semgrep/tests/sol-rules.t.sol index e98175d62a48c..9d0179318c978 100644 --- a/.semgrep/tests/sol-rules.t.sol +++ b/.semgrep/tests/sol-rules.t.sol @@ -712,3 +712,23 @@ contract SemgrepTest__sol_safety_try_catch_eip_150 { } } } + +contract SemgrepTest__sol_style_event_param_fmt { + // ok: sol-style-event-param-fmt + event OwnerChanged(address previousOwner, address newOwner); + + // ruleid: sol-style-event-param-fmt + event OwnerChanged(address _previousOwner, address _newOwner); + + // ruleid: sol-style-event-param-fmt + event OwnerChanged(address); + + // ruleid: sol-style-event-param-fmt + event OwnerChanged(address NEW_OWNER); + + // ok: sol-style-event-param-fmt + event SomethingWithMint(uint256 mint); + + // ruleid: sol-style-event-param-fmt + event SomethingWithMint(uint256 _mint); +} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1e77ad3997cff..e851085a56e44 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,6 +24,34 @@ spelling, stylistic or grammatical errors in documentation, code or elsewhere. Pull Requests that ignore this guideline will be closed, and may be aggregated into new Pull Requests without attribution. +### Trivial Changes Discouraged + +We kindly ask contributors **not to submit pull requests that only make trivial changes**, such as: + +- Adjusting whitespace, punctuation, or formatting without substantive content changes +- Renaming variables or refactoring code without a clear functional or readability benefit +- Other one‑line or cosmetic edits that do not materially improve the project + +#### Why we discourage trivial PRs + +While we appreciate every attempt to contribute, **small, non‑substantive PRs create significant review and merge overhead** for maintainers without meaningfully improving the codebase or documentation. Reviewing, testing, and merging even a one‑word change takes real time and effort. + +To keep maintainers focused on impactful contributions, we will: + +- **Close trivial PRs** that fall into the above categories +- **Aggregate minor typo or formatting fixes internally** if needed, without attribution +- Encourage contributors to instead take on meaningful issues, such as those labeled [`D-good-first-issue`](https://github.com/ethereum-optimism/optimism/labels/D-good-first-issue) or [`S-confirmed`](https://github.com/ethereum-optimism/optimism/labels/S-confirmed) + +#### Better ways to help + +If you’d like to contribute in a small but meaningful way: + +- **Batch several fixes into one PR** instead of submitting many tiny ones +- Look for beginner‑friendly issues in our [`D-good-first-issue`](https://github.com/ethereum-optimism/optimism/labels/D-good-first-issue) label +- Improve documentation by adding missing explanations, examples, or clarifications rather than small grammar tweaks + +This ensures your effort has a **real impact** and increases the chance your PR will be reviewed and merged quickly. + ## Code of Conduct Interactions within this repository are subject to a [Code of Conduct](https://github.com/ethereum-optimism/.github/blob/master/CODE_OF_CONDUCT.md) adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/1/4/code-of-conduct/). diff --git a/Makefile b/Makefile index 96f03c4fa87f5..4604e65be9aee 100644 --- a/Makefile +++ b/Makefile @@ -21,13 +21,12 @@ build-contracts: .PHONY: build-contracts lint-go: ## Lints Go code with specific linters - golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... - golangci-lint run -E err113 --timeout 5m -e "errors.As" -e "errors.Is" ./op-program/client/... + golangci-lint run ./... go mod tidy -diff .PHONY: lint-go lint-go-fix: ## Lints Go code with specific linters and fixes reported issues - golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... --fix + golangci-lint run ./... --fix .PHONY: lint-go-fix golang-docker: ## Builds Docker images for Go components using buildx @@ -207,7 +206,6 @@ TEST_PKGS := \ ./packages/contracts-bedrock/scripts/checks/... \ ./op-dripper/... \ ./devnet-sdk/... \ - ./op-acceptance-tests/... \ ./kurtosis-devnet/... \ ./op-devstack/... \ ./op-deployer/pkg/deployer/artifacts/... \ @@ -231,6 +229,9 @@ RPC_TEST_PKGS := \ ./op-deployer/pkg/deployer/pipeline/... \ ./op-deployer/pkg/deployer/upgrade/... +# All test packages used by CI (combination of all package groups) +ALL_TEST_PACKAGES := $(TEST_PKGS) $(RPC_TEST_PKGS) $(FRAUD_PROOF_TEST_PKGS) + # Common test environment variables # For setting PARALLEL, nproc is for linux, sysctl for Mac and then fallback to 4 if neither is available define DEFAULT_TEST_ENV_VARS @@ -245,7 +246,9 @@ endef define CI_ENV_VARS export OP_TESTLOG_FILE_LOGGER_OUTDIR=$$(realpath ./tmp/testlogs) && \ export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" && \ -export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" +export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" && \ +export NAT_INTEROP_LOADTEST_TARGET=10 && \ +export NAT_INTEROP_LOADTEST_TIMEOUT=30s endef # Test timeout (can be overridden via environment) @@ -261,34 +264,48 @@ go-tests-short: $(TEST_DEPS) ## Runs comprehensive Go tests with -short flag go test -short -parallel=$$PARALLEL -timeout=$(TEST_TIMEOUT) $(TEST_PKGS) .PHONY: go-tests-short -go-tests-short-ci: ## Runs short Go tests with gotestsum for CI (assumes deps built by CI) +# Internal target for running Go tests with gotestsum for CI +# Usage: make _go-tests-ci-internal GO_TEST_FLAGS="-short" +_go-tests-ci-internal: @echo "Setting up test directories..." mkdir -p ./tmp/test-results ./tmp/testlogs @echo "Running Go tests with gotestsum..." $(DEFAULT_TEST_ENV_VARS) && \ $(CI_ENV_VARS) && \ - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results.xml \ - --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$(TEST_PKGS) $(RPC_TEST_PKGS) $(FRAUD_PROOF_TEST_PKGS)" \ - -- -parallel=$$PARALLEL -coverprofile=coverage.out -short -timeout=$(TEST_TIMEOUT) -tags="ci" + if [ -n "$$CIRCLE_NODE_TOTAL" ] && [ "$$CIRCLE_NODE_TOTAL" -gt 1 ]; then \ + export NODE_INDEX=$${CIRCLE_NODE_INDEX:-0} && \ + export NODE_TOTAL=$${CIRCLE_NODE_TOTAL:-1} && \ + export PARALLEL_PACKAGES=$$(echo "$(ALL_TEST_PACKAGES)" | tr ' ' '\n' | awk -v idx=$$NODE_INDEX -v total=$$NODE_TOTAL 'NR % total == idx' | tr '\n' ' ') && \ + if [ -n "$$PARALLEL_PACKAGES" ]; then \ + echo "Node $$NODE_INDEX/$$NODE_TOTAL running packages: $$PARALLEL_PACKAGES"; \ + gotestsum --format=testname \ + --junitfile=./tmp/test-results/results-$$NODE_INDEX.xml \ + --jsonfile=./tmp/testlogs/log-$$NODE_INDEX.json \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ + --packages="$$PARALLEL_PACKAGES" \ + -- -parallel=$$PARALLEL -coverprofile=coverage-$$NODE_INDEX.out $(GO_TEST_FLAGS) -timeout=$(TEST_TIMEOUT) -tags="ci"; \ + else \ + echo "ERROR: Node $$NODE_INDEX/$$NODE_TOTAL has no packages to run! Perhaps parallelism is set too high? (ALL_TEST_PACKAGES has $$(echo '$(ALL_TEST_PACKAGES)' | wc -w) packages)"; \ + exit 1; \ + fi; \ + else \ + gotestsum --format=testname \ + --junitfile=./tmp/test-results/results.xml \ + --jsonfile=./tmp/testlogs/log.json \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ + --packages="$(ALL_TEST_PACKAGES)" \ + -- -parallel=$$PARALLEL -coverprofile=coverage.out $(GO_TEST_FLAGS) -timeout=$(TEST_TIMEOUT) -tags="ci"; \ + fi +.PHONY: _go-tests-ci-internal + +go-tests-short-ci: ## Runs short Go tests with gotestsum for CI (assumes deps built by CI) + $(MAKE) _go-tests-ci-internal GO_TEST_FLAGS="-short" .PHONY: go-tests-short-ci go-tests-ci: ## Runs comprehensive Go tests with gotestsum for CI (assumes deps built by CI) - @echo "Setting up test directories..." - mkdir -p ./tmp/test-results ./tmp/testlogs - @echo "Running Go tests with gotestsum..." - $(DEFAULT_TEST_ENV_VARS) && \ - $(CI_ENV_VARS) && \ - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results.xml \ - --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$(TEST_PKGS) $(RPC_TEST_PKGS) $(FRAUD_PROOF_TEST_PKGS)" \ - -- -parallel=$$PARALLEL -coverprofile=coverage.out -timeout=$(TEST_TIMEOUT) -tags="ci" + $(MAKE) _go-tests-ci-internal GO_TEST_FLAGS="" .PHONY: go-tests-ci go-tests-fraud-proofs-ci: ## Runs fraud proofs Go tests with gotestsum for CI (assumes deps built by CI) diff --git a/README.md b/README.md index e89b10785624a..ea9818766bcf4 100644 --- a/README.md +++ b/README.md @@ -63,22 +63,38 @@ The Optimism Immunefi program offers up to $2,000,042 for in-scope critical vuln ## Directory Structure
+├── cannon: Onchain MIPS instruction emulator for fault proofs
+├── devnet-sdk: Comprehensive toolkit for standardized devnet interactions
 ├── docs: A collection of documents including audits and post-mortems
 ├── kurtosis-devnet: OP-Stack Kurtosis devnet
+├── op-acceptance-tests: Acceptance tests and configuration for OP Stack
+├── op-alt-da: Alternative Data Availability mode (beta)
 ├── op-batcher: L2-Batch Submitter, submits bundles of batches to L1
 ├── op-chain-ops: State surgery utilities
 ├── op-challenger: Dispute game challenge agent
+├── op-conductor: High-availability sequencer service
+├── op-deployer: CLI tool for deploying and upgrading OP Stack smart contracts
+├── op-devstack: Flexible test frontend for integration and acceptance testing
+├── op-dispute-mon: Off-chain service to monitor dispute games
+├── op-dripper: Controlled token distribution service
 ├── op-e2e: End-to-End testing of all bedrock components in Go
-├── op-node: rollup consensus-layer client
+├── op-faucet: Dev-faucet with support for multiple chains
+├── op-fetcher: Data fetching utilities
+├── op-interop-mon: Interoperability monitoring service
+├── op-node: Rollup consensus-layer client
 ├── op-preimage: Go bindings for Preimage Oracle
 ├── op-program: Fault proof program
 ├── op-proposer: L2-Output Submitter, submits proposals to L1
 ├── op-service: Common codebase utilities
+├── op-supervisor: Service to monitor chains and determine cross-chain message safety
+├── op-sync-tester: Sync testing utilities
+├── op-test-sequencer: Test sequencer for development
+├── op-up: Deployment and management utilities
+├── op-validator: Tool for validating Optimism chain configurations and deployments
 ├── op-wheel: Database utilities
 ├── ops: Various operational packages
 ├── packages
 │   ├── contracts-bedrock: OP Stack smart contracts
-├── semgrep: Semgrep rules and tests
 
## Development and Release Process diff --git a/cannon/Makefile b/cannon/Makefile index 9097f07192773..4aeae11a06801 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -91,9 +91,9 @@ cannon-stf-verify: fuzz: printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateConsistencyMulOp ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateConsistencyMultOp ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateConsistencyMultuOp ./mipsevm/tests" \ + "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMulOp ./mipsevm/tests" \ + "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMultOp ./mipsevm/tests" \ + "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMultuOp ./mipsevm/tests" \ "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallBrk ./mipsevm/tests" \ "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallMmap ./mipsevm/tests" \ "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallExitGroup ./mipsevm/tests" \ diff --git a/cannon/cmd/run.go b/cannon/cmd/run.go index 0cad2107b1730..bd35f22f6e2e8 100644 --- a/cannon/cmd/run.go +++ b/cannon/cmd/run.go @@ -99,7 +99,7 @@ var ( RunInfoAtFlag = &cli.GenericFlag{ Name: "info-at", Usage: "step pattern to print info at: " + patternHelp, - Value: MustStepMatcherFlag("%100000"), + Value: MustStepMatcherFlag("%1000000000"), Required: false, } RunPProfCPU = &cli.BoolFlag{ @@ -395,7 +395,7 @@ func Run(ctx *cli.Context) error { } } - state, err := versions.LoadStateFromFile(ctx.Path(RunInputFlag.Name)) + state, err := versions.LoadStateFromFileWithLargeICache(ctx.Path(RunInputFlag.Name)) if err != nil { return fmt.Errorf("failed to load state: %w", err) } diff --git a/cannon/mipsevm/arch/arch64.go b/cannon/mipsevm/arch/arch64.go index 7169fc474e7fc..58f84c6927511 100644 --- a/cannon/mipsevm/arch/arch64.go +++ b/cannon/mipsevm/arch/arch64.go @@ -15,11 +15,12 @@ const ( ExtMask = 0x7 // Ensure virtual address is limited to 48-bits as many user programs assume such to implement packed pointers - // limit 0x00_00_FF_FF_FF_FF_FF_FF - HeapStart = 0x00_00_10_00_00_00_00_00 - HeapEnd = 0x00_00_60_00_00_00_00_00 - ProgramBreak = 0x00_00_40_00_00_00_00_00 - HighMemoryStart = 0x00_00_7F_FF_FF_FF_F0_00 + Limit = 0x00_00_FF_FF_FF_FF_FF_FF + ProgramHeapStart = 0x00_00_00_c0_00_00_00_00 + HeapStart = 0x00_00_10_00_00_00_00_00 + HeapEnd = 0x00_00_60_00_00_00_00_00 + ProgramBreak = 0x00_00_40_00_00_00_00_00 + HighMemoryStart = 0x00_00_7F_FF_FF_FF_F0_00 ) // MIPS64 syscall table - https://github.com/torvalds/linux/blob/3efc57369a0ce8f76bf0804f7e673982384e4ac9/arch/mips/kernel/syscalls/syscall_n64.tbl. Generate the syscall numbers using the Makefile in that directory. diff --git a/cannon/mipsevm/debug.go b/cannon/mipsevm/debug.go index 02c20ea248239..ffa6bfaaeb641 100644 --- a/cannon/mipsevm/debug.go +++ b/cannon/mipsevm/debug.go @@ -3,11 +3,13 @@ package mipsevm import "github.com/ethereum/go-ethereum/common/hexutil" type DebugInfo struct { - Pages int `json:"pages"` - MemoryUsed hexutil.Uint64 `json:"memory_used"` - NumPreimageRequests int `json:"num_preimage_requests"` - TotalPreimageSize int `json:"total_preimage_size"` - TotalSteps uint64 `json:"total_steps"` + Pages int `json:"pages"` + MemoryUsed hexutil.Uint64 `json:"memory_used"` + NumPreimageRequests int `json:"num_preimage_requests"` + TotalPreimageSize int `json:"total_preimage_size"` + TotalSteps uint64 `json:"total_steps"` + InstructionCacheMissCount uint64 `json:"instruction_cache_miss_count"` + HighestICacheMissPC hexutil.Uint64 `json:"highest_icache_miss_pc"` // Multithreading-related stats below RmwSuccessCount uint64 `json:"rmw_success_count"` RmwFailCount uint64 `json:"rmw_fail_count"` diff --git a/cannon/mipsevm/debug_test.go b/cannon/mipsevm/debug_test.go index e2b6b8e43ab69..df567b202bd2d 100644 --- a/cannon/mipsevm/debug_test.go +++ b/cannon/mipsevm/debug_test.go @@ -18,6 +18,8 @@ func TestDebugInfo_Serialization(t *testing.T) { NumPreimageRequests: 3, TotalPreimageSize: 4, TotalSteps: 123456, + InstructionCacheMissCount: 10, + HighestICacheMissPC: 11, RmwSuccessCount: 5, RmwFailCount: 6, MaxStepsBetweenLLAndSC: 7, diff --git a/cannon/mipsevm/exec/mips_instructions.go b/cannon/mipsevm/exec/mips_instructions.go index 8f0d42e09858d..7fc4954b82704 100644 --- a/cannon/mipsevm/exec/mips_instructions.go +++ b/cannon/mipsevm/exec/mips_instructions.go @@ -38,7 +38,7 @@ func GetInstructionDetails(pc Word, memory *memory.Memory) (insn, opcode, fun ui // ExecMipsCoreStepLogic executes a MIPS instruction that isn't a syscall nor a RMW operation // If a store operation occurred, then it returns the effective address of the store memory location. -func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]Word, memory *memory.Memory, insn, opcode, fun uint32, memTracker MemTracker, stackTracker StackTracker, features mipsevm.FeatureToggles) (memUpdated bool, effMemAddr Word, err error) { +func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]Word, memory *memory.Memory, insn, opcode, fun uint32, memTracker MemTracker, stackTracker StackTracker) (memUpdated bool, effMemAddr Word, err error) { // j-type j/jal if opcode == 2 || opcode == 3 { linkReg := Word(0) @@ -117,7 +117,7 @@ func ExecMipsCoreStepLogic(cpu *mipsevm.CpuScalars, registers *[32]Word, memory } // ALU - val := ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem, features) + val := ExecuteMipsInstruction(insn, opcode, fun, rs, rt, mem) funSel := uint32(0x1c) if !arch.IsMips32 { @@ -182,7 +182,7 @@ func assertMips64Fun(fun uint32) { } } -func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem Word, features mipsevm.FeatureToggles) Word { +func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem Word) Word { if opcode == 0 || (opcode >= 8 && opcode < 0xF) || (!arch.IsMips32 && (opcode == 0x18 || opcode == 0x19)) { // transform ArithLogI to SPECIAL switch opcode { @@ -350,7 +350,7 @@ func ExecuteMipsInstruction(insn uint32, opcode uint32, fun uint32, rs, rt, mem rs <<= 1 } return Word(i) - case features.SupportDclzDclo && (fun == 0x24 || fun == 0x25): // dclz, dclo + case fun == 0x24 || fun == 0x25: // dclz, dclo assertMips64Fun(insn) if fun == 0x24 { rs = ^rs diff --git a/cannon/mipsevm/iface.go b/cannon/mipsevm/iface.go index 3fe375270537f..f12197a617639 100644 --- a/cannon/mipsevm/iface.go +++ b/cannon/mipsevm/iface.go @@ -74,9 +74,6 @@ type Metadata interface { // Toggles here are temporary and should be removed once the newer state version is deployed widely. The older // version can then be supported via multicannon pulling in a specific build and support for it dropped in latest code. type FeatureToggles struct { - SupportMinimalSysEventFd2 bool - SupportDclzDclo bool - SupportNoopMprotect bool SupportWorkingSysGetRandom bool } diff --git a/cannon/mipsevm/memory/binary_tree.go b/cannon/mipsevm/memory/binary_tree.go index 4b5b0a2cc83b6..801c14a35cfa2 100644 --- a/cannon/mipsevm/memory/binary_tree.go +++ b/cannon/mipsevm/memory/binary_tree.go @@ -1,7 +1,15 @@ package memory import ( + "fmt" "math/bits" + + "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" +) + +const ( + defaultCodeSize = 128 * 1024 * 1024 // 128 MiB + defaultHeapSize = 512 * 1024 * 1024 // 512 MiB ) // BinaryTreeIndex is a representation of the state of the memory in a binary merkle tree. @@ -12,13 +20,39 @@ type BinaryTreeIndex struct { pageTable map[Word]*CachedPage } -func NewBinaryTreeMemory() *Memory { - pages := make(map[Word]*CachedPage) +func NewBinaryTreeMemory(codeSize, heapSize arch.Word) *Memory { + pages := make(map[arch.Word]*CachedPage) index := NewBinaryTreeIndex(pages) + + if codeSize == 0 { + codeSize = defaultCodeSize + } + if heapSize == 0 { + heapSize = defaultHeapSize + } + + // Defensive bounds: code region must not overlap heap start + if codeSize > arch.ProgramHeapStart { + panic(fmt.Sprintf("codeSize (0x%x) overlaps heap start (0x%x)", codeSize, arch.ProgramHeapStart)) + } + + indexedRegions := make([]MappedMemoryRegion, 2) + indexedRegions[0] = MappedMemoryRegion{ + startAddr: 0, + endAddr: codeSize, + Data: make([]byte, codeSize), + } + indexedRegions[1] = MappedMemoryRegion{ + startAddr: arch.ProgramHeapStart, + endAddr: arch.ProgramHeapStart + heapSize, + Data: make([]byte, heapSize), + } + return &Memory{ - merkleIndex: index, - pageTable: pages, - lastPageKeys: [2]Word{^Word(0), ^Word(0)}, // default to invalid keys, to not match any pages + merkleIndex: index, + pageTable: pages, + lastPageKeys: [2]arch.Word{^arch.Word(0), ^arch.Word(0)}, + MappedRegions: indexedRegions, } } diff --git a/cannon/mipsevm/memory/memory.go b/cannon/mipsevm/memory/memory.go index fe29db30acb55..e508d794ebb75 100644 --- a/cannon/mipsevm/memory/memory.go +++ b/cannon/mipsevm/memory/memory.go @@ -27,6 +27,20 @@ const ( type Word = arch.Word +type MappedMemoryRegion struct { + startAddr Word + endAddr Word + Data []byte +} + +func (m *MappedMemoryRegion) AddrInRegion(addr Word) bool { + return addr >= m.startAddr && addr < m.endAddr +} + +func (m *MappedMemoryRegion) PageIndexInRegion(pageIndex Word) bool { + return pageIndex >= m.startAddr>>PageAddrSize && pageIndex < m.endAddr>>PageAddrSize +} + type Memory struct { merkleIndex PageIndex // Note: since we don't de-alloc Pages, we don't do ref-counting. @@ -38,6 +52,8 @@ type Memory struct { // this prevents map lookups each instruction lastPageKeys [2]Word lastPage [2]*CachedPage + + MappedRegions []MappedMemoryRegion } type PageIndex interface { @@ -50,8 +66,51 @@ type PageIndex interface { New(pages map[Word]*CachedPage) PageIndex } +func NewMemoryWithLargeRegions() *Memory { + return NewBinaryTreeMemory(defaultCodeSize, defaultHeapSize) +} + func NewMemory() *Memory { - return NewBinaryTreeMemory() + return NewBinaryTreeMemory(4096, 4096) +} + +// start end size gap +func (m *Memory) GetAllocatedRanges() [][4]Word { + var ranges [][4]Word + if len(m.pageTable) == 0 { + return ranges + } + + // Extract and sort page addresses + keys := make([]Word, 0, len(m.pageTable)) + for key := range m.pageTable { + keys = append(keys, key) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + + // Find contiguous ranges and gaps + start := keys[0] + prev := start + var lastEnd Word = start - 1 + + for i := 1; i < len(keys); i++ { + if keys[i] != prev+1 { + gap := start - lastEnd - 1 // Gap is calculated from end of prev range to start of new one + ranges = append(ranges, [4]Word{start, prev, prev - start + 1, gap}) + lastEnd = prev + start = keys[i] + } + prev = keys[i] + } + + // Append last range + gap := start - lastEnd - 1 + ranges = append(ranges, [4]Word{start, prev, prev - start + 1, gap}) + for i := 0; i < len(ranges); i++ { + ranges[i][0] <<= PageAddrSize + ranges[i][1] <<= PageAddrSize + } + return ranges } func (m *Memory) MerkleRoot() [32]byte { @@ -66,7 +125,7 @@ func (m *Memory) PageCount() int { return len(m.pageTable) } -func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error { +func (m *Memory) ForEachPage(fn func(pageIndex Word, page Page) error) error { for pageIndex, cachedPage := range m.pageTable { if err := fn(pageIndex, cachedPage.Data); err != nil { return err @@ -74,7 +133,6 @@ func (m *Memory) ForEachPage(fn func(pageIndex Word, page *Page) error) error { } return nil } - func (m *Memory) MerkleizeSubtree(gindex uint64) [32]byte { return m.merkleIndex.MerkleizeSubtree(gindex) } @@ -155,7 +213,15 @@ func (m *Memory) GetWord(addr Word) Word { if addr&arch.ExtMask != 0 { panic(fmt.Errorf("unaligned memory access: %x", addr)) } + for _, region := range m.MappedRegions { + if ok := region.AddrInRegion(addr); ok { + offset := addr - region.startAddr + return arch.ByteOrderWord.Word(region.Data[offset : offset+arch.WordSizeBytes : offset+arch.WordSizeBytes]) + } + } + pageIndex := addr >> PageAddrSize + p, ok := m.PageLookup(pageIndex) if !ok { return 0 @@ -165,7 +231,17 @@ func (m *Memory) GetWord(addr Word) Word { } func (m *Memory) AllocPage(pageIndex Word) *CachedPage { - p := &CachedPage{Data: new(Page)} + p := new(CachedPage) + for _, region := range m.MappedRegions { + if region.PageIndexInRegion(pageIndex) { + indexAdjusted := pageIndex - region.startAddr>>PageAddrSize + p.Data = region.Data[indexAdjusted*PageSize : (indexAdjusted+1)*PageSize : (indexAdjusted+1)*PageSize] + break + } + } + if p.Data == nil { + p.Data = make(Page, PageSize) + } m.pageTable[pageIndex] = p m.merkleIndex.AddPage(pageIndex) return p @@ -237,8 +313,9 @@ func (m *Memory) Copy() *Memory { } for k, page := range m.pageTable { - data := new(Page) - *data = *page.Data + data := make(Page, PageSize) + // *data = *page.Data + copy(data, page.Data) out.AllocPage(k).Data = data } return out @@ -287,20 +364,23 @@ func (m *Memory) Deserialize(in io.Reader) error { return err } } + return nil } type pageEntry struct { - Index Word `json:"index"` - Data *Page `json:"data"` + Index Word `json:"index"` + Data *[PageSize]byte `json:"data"` } func (m *Memory) MarshalJSON() ([]byte, error) { // nosemgrep pages := make([]pageEntry, 0, len(m.pageTable)) for k, p := range m.pageTable { + data := new([PageSize]byte) + copy(data[:], p.Data) pages = append(pages, pageEntry{ Index: k, - Data: p.Data, + Data: data, }) } sort.Slice(pages, func(i, j int) bool { @@ -318,7 +398,8 @@ func (m *Memory) UnmarshalJSON(data []byte) error { if _, ok := m.pageTable[p.Index]; ok { return fmt.Errorf("cannot load duplicate page, entry %d, page index %d", i, p.Index) } - m.AllocPage(p.Index).Data = p.Data + page := m.AllocPage(p.Index) + copy(page.Data, p.Data[:]) } return nil } diff --git a/cannon/mipsevm/memory/memory64_benchmark_test.go b/cannon/mipsevm/memory/memory64_benchmark_test.go index 784d13785a412..1754cffef5556 100644 --- a/cannon/mipsevm/memory/memory64_benchmark_test.go +++ b/cannon/mipsevm/memory/memory64_benchmark_test.go @@ -9,9 +9,11 @@ import ( ) const ( - smallDataset = 12_500_000 - mediumDataset = 100_000_000 - largeDataset = 400_000_000 + smallDataset = 12_500_000 + mediumDataset = 100_000_000 + largeDataset = 400_000_000 + testDefaultCodeRegionSize = 4096 + testDefaultHeapSize = 4096 ) func BenchmarkMemoryOperations(b *testing.B) { @@ -36,7 +38,7 @@ func BenchmarkMemoryOperations(b *testing.B) { for _, bm := range benchmarks { b.Run("BinaryTree", func(b *testing.B) { b.Run(bm.name, func(b *testing.B) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) b.ResetTimer() bm.fn(b, m) }) diff --git a/cannon/mipsevm/memory/memory64_binary_tree_test.go b/cannon/mipsevm/memory/memory64_binary_tree_test.go index 90c83d92dbbcf..f382c5b63ad7f 100644 --- a/cannon/mipsevm/memory/memory64_binary_tree_test.go +++ b/cannon/mipsevm/memory/memory64_binary_tree_test.go @@ -17,7 +17,7 @@ import ( func TestMemory64BinaryTreeMerkleProof(t *testing.T) { t.Run("nearly empty tree", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0x10000, 0xAABBCCDD_EEFF1122) proof := m.MerkleProof(0x10000) require.Equal(t, uint64(0xAABBCCDD_EEFF1122), binary.BigEndian.Uint64(proof[:8])) @@ -26,7 +26,7 @@ func TestMemory64BinaryTreeMerkleProof(t *testing.T) { } }) t.Run("fuller tree", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0x10000, 0xaabbccdd) m.SetWord(0x80008, 42) m.SetWord(0x13370000, 123) @@ -50,38 +50,38 @@ func TestMemory64BinaryTreeMerkleProof(t *testing.T) { func TestMemory64BinaryTreeMerkleRoot(t *testing.T) { t.Run("empty", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("empty page", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0xF000, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "fully zeroed memory should have expected zero hash") }) t.Run("single page", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0xF000, 1) root := m.MerkleRoot() require.NotEqual(t, zeroHashes[64-5], root, "non-zero memory") }) t.Run("repeat zero", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0xF000, 0) m.SetWord(0xF008, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("two empty pages", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(PageSize*3, 0) m.SetWord(PageSize*10, 0) root := m.MerkleRoot() require.Equal(t, zeroHashes[64-5], root, "zero still") }) t.Run("random few pages", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(PageSize*3, 1) m.SetWord(PageSize*5, 42) m.SetWord(PageSize*6, 123) @@ -103,7 +103,7 @@ func TestMemory64BinaryTreeMerkleRoot(t *testing.T) { require.Equal(t, r1, r2, "expecting manual page combination to match subtree merkle func") }) t.Run("invalidate page", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0xF000, 0) require.Equal(t, zeroHashes[64-5], m.MerkleRoot(), "zero at first") m.SetWord(0xF008, 1) @@ -115,7 +115,7 @@ func TestMemory64BinaryTreeMerkleRoot(t *testing.T) { func TestMemory64BinaryTreeReadWrite(t *testing.T) { t.Run("large random", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) data := make([]byte, 20_000) _, err := rand.Read(data[:]) require.NoError(t, err) @@ -128,7 +128,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("repeat range", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) data := []byte(strings.Repeat("under the big bright yellow sun ", 40)) require.NoError(t, m.SetMemoryRange(0x1337, bytes.NewReader(data))) res, err := io.ReadAll(m.ReadMemoryRange(0x1337-10, Word(len(data)+20))) @@ -139,7 +139,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("empty range", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) addr := Word(0xAABBCC00) r := bytes.NewReader(nil) pre := m.MerkleRoot() @@ -165,7 +165,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("range page overlap", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) data := bytes.Repeat([]byte{0xAA}, PageAddrSize) require.NoError(t, m.SetMemoryRange(0, bytes.NewReader(data))) for i := 0; i < PageAddrSize/arch.WordSizeBytes; i++ { @@ -183,7 +183,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("read-write", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(16, 0xAABBCCDD_EEFF1122) require.Equal(t, Word(0xAABBCCDD_EEFF1122), m.GetWord(16)) m.SetWord(16, 0xAABB1CDD_EEFF1122) @@ -193,7 +193,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("unaligned read", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(16, Word(0xAABBCCDD_EEFF1122)) m.SetWord(24, 0x11223344_55667788) for i := Word(17); i < 24; i++ { @@ -207,7 +207,7 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { }) t.Run("unaligned write", func(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(16, 0xAABBCCDD_EEFF1122) require.Panics(t, func() { m.SetWord(17, 0x11223344) @@ -235,17 +235,17 @@ func TestMemory64BinaryTreeReadWrite(t *testing.T) { } func TestMemory64BinaryTreeJSON(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(8, 0xAABBCCDD_EEFF1122) dat, err := json.Marshal(m) require.NoError(t, err) - res := NewBinaryTreeMemory() + res := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) require.NoError(t, json.Unmarshal(dat, &res)) require.Equal(t, Word(0xAABBCCDD_EEFF1122), res.GetWord(8)) } func TestMemory64BinaryTreeCopy(t *testing.T) { - m := NewBinaryTreeMemory() + m := NewBinaryTreeMemory(testDefaultCodeRegionSize, testDefaultHeapSize) m.SetWord(0xAABBCCDD_8000, 0x000000_AABB) mcpy := m.Copy() require.Equal(t, Word(0xAABB), mcpy.GetWord(0xAABBCCDD_8000)) diff --git a/cannon/mipsevm/memory/page.go b/cannon/mipsevm/memory/page.go index d9e560dc64a25..e44dd3c3a9524 100644 --- a/cannon/mipsevm/memory/page.go +++ b/cannon/mipsevm/memory/page.go @@ -18,9 +18,9 @@ var zlibWriterPool = sync.Pool{ }, } -type Page [PageSize]byte +type Page []byte -func (p *Page) MarshalJSON() ([]byte, error) { // nosemgrep +func (p Page) MarshalJSON() ([]byte, error) { // nosemgrep var out bytes.Buffer w := zlibWriterPool.Get().(*zlib.Writer) defer zlibWriterPool.Put(w) @@ -34,7 +34,7 @@ func (p *Page) MarshalJSON() ([]byte, error) { // nosemgrep return json.Marshal(out.Bytes()) } -func (p *Page) UnmarshalJSON(dat []byte) error { +func (p Page) UnmarshalJSON(dat []byte) error { // Strip off the `"` characters at the start & end. dat = dat[1 : len(dat)-1] // Decode b64 then decompress @@ -52,7 +52,7 @@ func (p *Page) UnmarshalJSON(dat []byte) error { } } -func (p *Page) UnmarshalText(dat []byte) error { +func (p Page) UnmarshalText(dat []byte) error { if len(dat) != PageSize*2 { return fmt.Errorf("expected %d hex chars, but got %d", PageSize*2, len(dat)) } @@ -65,7 +65,7 @@ func (p *Page) UnmarshalText(dat []byte) error { var _ [0]struct{} = [PageSize - 4096]struct{}{} type CachedPage struct { - Data *Page + Data Page // intermediate nodes only Cache [PageSize / 32][32]byte // bit set to 1 if the intermediate node is valid diff --git a/cannon/mipsevm/memory/page_test.go b/cannon/mipsevm/memory/page_test.go index e7a8167a9df49..6665579e013e5 100644 --- a/cannon/mipsevm/memory/page_test.go +++ b/cannon/mipsevm/memory/page_test.go @@ -8,7 +8,7 @@ import ( ) func TestCachedPage(t *testing.T) { - p := &CachedPage{Data: new(Page)} + p := &CachedPage{Data: make(Page, PageSize)} p.Data[42] = 0xab gindex := ((uint64(1) << PageAddrSize) | 42) >> 5 diff --git a/cannon/mipsevm/multithreaded/instrumented.go b/cannon/mipsevm/multithreaded/instrumented.go index 73138925569ea..a8ee4c0551e75 100644 --- a/cannon/mipsevm/multithreaded/instrumented.go +++ b/cannon/mipsevm/multithreaded/instrumented.go @@ -11,6 +11,9 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" ) +type InstructionDetails struct { + insn, opcode, fun uint32 +} type InstrumentedState struct { state *State @@ -24,12 +27,23 @@ type InstrumentedState struct { preimageOracle *exec.TrackingPreimageOracleReader meta mipsevm.Metadata - features mipsevm.FeatureToggles + + cached_decode []InstructionDetails + features mipsevm.FeatureToggles } var _ mipsevm.FPVM = (*InstrumentedState)(nil) func NewInstrumentedState(state *State, po mipsevm.PreimageOracle, stdOut, stdErr io.Writer, log log.Logger, meta mipsevm.Metadata, features mipsevm.FeatureToggles) *InstrumentedState { + memLen := len(state.Memory.MappedRegions[0].Data) + cached_decode := make([]InstructionDetails, memLen/4) + + // Perform eager decode of all mapped code + for pc := Word(0); pc < Word(memLen); pc += 4 { + insn, opcode, fun := exec.GetInstructionDetails(pc, state.Memory) + cached_decode[pc/4] = InstructionDetails{insn, opcode, fun} + } + return &InstrumentedState{ state: state, log: log, @@ -40,6 +54,7 @@ func NewInstrumentedState(state *State, po mipsevm.PreimageOracle, stdOut, stdEr statsTracker: NoopStatsTracker(), preimageOracle: exec.NewTrackingPreimageOracleReader(po), meta: meta, + cached_decode: cached_decode, features: features, } } @@ -129,3 +144,11 @@ func (m *InstrumentedState) LookupSymbol(addr arch.Word) string { } return m.meta.LookupSymbol(addr) } + +func (m *InstrumentedState) UpdateInstructionCache(pc arch.Word) { + idx := pc / 4 + if int(idx) < len(m.cached_decode) { + insn, opcode, fun := exec.GetInstructionDetails(pc, m.state.Memory) + m.cached_decode[idx] = InstructionDetails{insn, opcode, fun} + } +} diff --git a/cannon/mipsevm/multithreaded/instrumented_test.go b/cannon/mipsevm/multithreaded/instrumented_test.go index f6c4839303858..57faafbb2dfc1 100644 --- a/cannon/mipsevm/multithreaded/instrumented_test.go +++ b/cannon/mipsevm/multithreaded/instrumented_test.go @@ -27,7 +27,7 @@ func TestInstrumentedState_Hello(t *testing.T) { var stdOutBuf, stdErrBuf bytes.Buffer us := vmFactory(state, nil, io.MultiWriter(&stdOutBuf, os.Stdout), io.MultiWriter(&stdErrBuf, os.Stderr), testutil.CreateLogger(), meta) - maxSteps := 450_000 + maxSteps := 500_000 for i := 0; i < maxSteps; i++ { if us.GetState().GetExited() { break @@ -129,7 +129,7 @@ func TestInstrumentedState_SyscallEventFdProgram(t *testing.T) { err := us.InitDebug() require.NoError(t, err) - for i := 0; i < 500_000; i++ { + for i := 0; i < 550_000; i++ { if us.GetState().GetExited() { break } @@ -418,7 +418,7 @@ func runTestsAcrossVms[T any](t *testing.T, testNamer TestNamer[T], testCases [] } variations := []VMVariations{ - {name: "Go 1.23 VM", goTarget: testutil.Go1_23, features: mipsevm.FeatureToggles{SupportMinimalSysEventFd2: true, SupportDclzDclo: true}}, + {name: "Go 1.23 VM", goTarget: testutil.Go1_23, features: mipsevm.FeatureToggles{}}, {name: "Go 1.24 VM", goTarget: testutil.Go1_24, features: allFeaturesEnabled()}, } diff --git a/cannon/mipsevm/multithreaded/mips.go b/cannon/mipsevm/multithreaded/mips.go index 978cbe9013339..31cd20546b076 100644 --- a/cannon/mipsevm/multithreaded/mips.go +++ b/cannon/mipsevm/multithreaded/mips.go @@ -165,9 +165,6 @@ func (m *InstrumentedState) handleSyscall() error { // Otherwise, ignored (noop) case arch.SysMunmap: case arch.SysMprotect: - if !m.features.SupportNoopMprotect { - m.handleUnrecognizedSyscall(syscallNum) - } case arch.SysGetAffinity: case arch.SysMadvise: case arch.SysRtSigprocmask: @@ -198,10 +195,6 @@ func (m *InstrumentedState) handleSyscall() error { case arch.SysGetRLimit: case arch.SysLseek: case arch.SysEventFd2: - if !m.features.SupportMinimalSysEventFd2 { - m.handleUnrecognizedSyscall(syscallNum) - } - // a0 = initial value, a1 = flags // Validate flags if a1&exec.EFD_NONBLOCK == 0 { @@ -323,8 +316,21 @@ func (m *InstrumentedState) doMipsStep() error { } m.state.StepsSinceLastContextSwitch += 1 - //instruction fetch - insn, opcode, fun := exec.GetInstructionDetails(m.state.GetPC(), m.state.Memory) + pc := m.state.GetPC() + if pc&0x3 != 0 { + panic(fmt.Sprintf("unaligned instruction fetch: PC = 0x%x", pc)) + } + cacheIdx := pc / 4 + + var insn, opcode, fun uint32 + if int(cacheIdx) < len(m.cached_decode) { + decoded := m.cached_decode[cacheIdx] + insn, opcode, fun = decoded.insn, decoded.opcode, decoded.fun + } else { + // PC is outside eager region + m.statsTracker.trackInstructionCacheMiss(pc) + insn, opcode, fun = exec.GetInstructionDetails(pc, m.state.Memory) + } // Handle syscall separately // syscall (can read and write) @@ -344,7 +350,7 @@ func (m *InstrumentedState) doMipsStep() error { } // Exec the rest of the step logic - memUpdated, effMemAddr, err := exec.ExecMipsCoreStepLogic(m.state.getCpuRef(), m.state.GetRegistersRef(), m.state.Memory, insn, opcode, fun, m.memoryTracker, m.stackTracker, m.features) + memUpdated, effMemAddr, err := exec.ExecMipsCoreStepLogic(m.state.getCpuRef(), m.state.GetRegistersRef(), m.state.Memory, insn, opcode, fun, m.memoryTracker, m.stackTracker) if err != nil { return err } diff --git a/cannon/mipsevm/multithreaded/state.go b/cannon/mipsevm/multithreaded/state.go index 35ca34d1bc077..f2405f7c1bdce 100644 --- a/cannon/mipsevm/multithreaded/state.go +++ b/cannon/mipsevm/multithreaded/state.go @@ -70,6 +70,8 @@ type State struct { // LastHint is optional metadata, and not part of the VM state itself. LastHint hexutil.Bytes + + UseLargeICache bool } var _ mipsevm.FPVMState = (*State)(nil) @@ -333,7 +335,11 @@ func (s *State) Serialize(out io.Writer) error { func (s *State) Deserialize(in io.Reader) error { bin := serialize.NewBinaryReader(in) - s.Memory = memory.NewMemory() + if s.UseLargeICache { + s.Memory = memory.NewMemoryWithLargeRegions() + } else { + s.Memory = memory.NewMemory() + } if err := s.Memory.Deserialize(in); err != nil { return err } diff --git a/cannon/mipsevm/multithreaded/stats.go b/cannon/mipsevm/multithreaded/stats.go index 04de1516359bd..77185ea64c867 100644 --- a/cannon/mipsevm/multithreaded/stats.go +++ b/cannon/mipsevm/multithreaded/stats.go @@ -1,6 +1,7 @@ package multithreaded import ( + "github.com/ethereum/go-ethereum/common/hexutil" lru "github.com/hashicorp/golang-lru/v2/simplelru" "github.com/ethereum-optimism/optimism/cannon/mipsevm" @@ -14,6 +15,7 @@ type StatsTracker interface { trackReservationInvalidation() trackForcedPreemption() trackThreadActivated(tid Word, step uint64) + trackInstructionCacheMiss(pc Word) populateDebugInfo(debugInfo *mipsevm.DebugInfo) } @@ -31,6 +33,7 @@ func (s *noopStatsTracker) trackReservationInvalidation() {} func (s *noopStatsTracker) trackForcedPreemption() {} func (s *noopStatsTracker) trackThreadActivated(tid Word, step uint64) {} func (s *noopStatsTracker) populateDebugInfo(debugInfo *mipsevm.DebugInfo) {} +func (s *noopStatsTracker) trackInstructionCacheMiss(pc Word) {} var _ StatsTracker = (*noopStatsTracker)(nil) @@ -48,6 +51,8 @@ type statsTrackerImpl struct { reservationInvalidationCount uint64 forcedPreemptionCount uint64 idleStepCountThread0 uint64 + icacheMissCount uint64 + highestICacheMissPC Word } func (s *statsTrackerImpl) populateDebugInfo(debugInfo *mipsevm.DebugInfo) { @@ -57,6 +62,8 @@ func (s *statsTrackerImpl) populateDebugInfo(debugInfo *mipsevm.DebugInfo) { debugInfo.ReservationInvalidationCount = s.reservationInvalidationCount debugInfo.ForcedPreemptionCount = s.forcedPreemptionCount debugInfo.IdleStepCountThread0 = s.idleStepCountThread0 + debugInfo.InstructionCacheMissCount = s.icacheMissCount + debugInfo.HighestICacheMissPC = hexutil.Uint64(s.highestICacheMissPC) } func (s *statsTrackerImpl) trackLL(threadId Word, step uint64) { @@ -105,6 +112,13 @@ func (s *statsTrackerImpl) trackThreadActivated(tid Word, step uint64) { s.activeThreadId = tid } +func (s *statsTrackerImpl) trackInstructionCacheMiss(pc Word) { + s.icacheMissCount += 1 + if pc > s.highestICacheMissPC { + s.highestICacheMissPC = pc + } +} + func NewStatsTracker() StatsTracker { return newStatsTracker(5) } diff --git a/cannon/mipsevm/multithreaded/testutil/expectations.go b/cannon/mipsevm/multithreaded/testutil/expectations.go index 5afb773e42568..d9f02d1561e6f 100644 --- a/cannon/mipsevm/multithreaded/testutil/expectations.go +++ b/cannon/mipsevm/multithreaded/testutil/expectations.go @@ -29,18 +29,8 @@ type ExpectedState struct { Step uint64 LastHint hexutil.Bytes MemoryRoot common.Hash + threadExpectations *threadExpectations expectedMemory *memory.Memory - // Threading-related expectations - StepsSinceLastContextSwitch uint64 - TraverseRight bool - NextThreadId arch.Word - ThreadCount int - RightStackSize int - LeftStackSize int - prestateActiveThreadId arch.Word - prestateActiveThreadOrig ExpectedThreadState // Cached for internal use - ActiveThreadId arch.Word - threadExpectations map[arch.Word]*ExpectedThreadState // Remember some actions so we can analyze expectations memoryWrites []arch.Word } @@ -60,13 +50,6 @@ type ExpectedThreadState struct { func NewExpectedState(t require.TestingT, state mipsevm.FPVMState) *ExpectedState { fromState := ToMTState(t, state) - currentThread := fromState.GetCurrentThread() - - expectedThreads := make(map[arch.Word]*ExpectedThreadState) - for _, t := range GetAllThreads(fromState) { - expectedThreads[t.ThreadId] = newExpectedThreadState(t) - } - return &ExpectedState{ // General Fields PreimageKey: fromState.GetPreimageKey(), @@ -80,19 +63,8 @@ func NewExpectedState(t require.TestingT, state mipsevm.FPVMState) *ExpectedStat Step: fromState.GetStep(), LastHint: fromState.GetLastHint(), MemoryRoot: fromState.GetMemory().MerkleRoot(), - // Thread-related global fields - StepsSinceLastContextSwitch: fromState.StepsSinceLastContextSwitch, - TraverseRight: fromState.TraverseRight, - NextThreadId: fromState.NextThreadId, - ThreadCount: fromState.ThreadCount(), - RightStackSize: len(fromState.RightThreadStack), - LeftStackSize: len(fromState.LeftThreadStack), - // ThreadState expectations - prestateActiveThreadId: currentThread.ThreadId, - prestateActiveThreadOrig: *newExpectedThreadState(currentThread), // Cache prestate thread for internal use - ActiveThreadId: currentThread.ThreadId, - threadExpectations: expectedThreads, - expectedMemory: fromState.Memory.Copy(), + threadExpectations: newThreadExpectations(fromState), + expectedMemory: fromState.Memory.Copy(), } } @@ -119,7 +91,7 @@ func (e *ExpectedState) ExpectStep() { e.Step += 1 e.PrestateActiveThread().PC += 4 e.PrestateActiveThread().NextPC += 4 - e.StepsSinceLastContextSwitch += 1 + e.threadExpectations.StepsSinceLastContextSwitch += 1 } func (e *ExpectedState) ExpectMemoryReservationCleared() { @@ -151,45 +123,52 @@ func (e *ExpectedState) ExpectMemoryWrite(addr arch.Word, val arch.Word) { e.MemoryRoot = e.expectedMemory.MerkleRoot() } -func (e *ExpectedState) ExpectPreemption(preState *multithreaded.State) { - e.ActiveThreadId = FindNextThread(preState).ThreadId - e.StepsSinceLastContextSwitch = 0 - if preState.TraverseRight { - e.TraverseRight = e.RightStackSize > 1 - e.RightStackSize -= 1 - e.LeftStackSize += 1 - } else { - e.TraverseRight = e.LeftStackSize == 1 - e.LeftStackSize -= 1 - e.RightStackSize += 1 - } +func (e *ExpectedState) ExpectPreemption() { + e.threadExpectations.ExpectPreemption() } func (e *ExpectedState) ExpectNewThread() *ExpectedThreadState { - newThreadId := e.NextThreadId - e.NextThreadId += 1 - e.ThreadCount += 1 + return e.threadExpectations.ExpectNewThread() +} - // Clone expectations from prestate active thread's original state (bf changing any expectations) - newThread := &ExpectedThreadState{} - *newThread = e.prestateActiveThreadOrig +func (e *ExpectedState) ExpectPoppedThread() { + e.threadExpectations.ExpectPop() +} - newThread.ThreadId = newThreadId - e.threadExpectations[newThreadId] = newThread +func (e *ExpectedState) ExpectTraverseRight(traverseRight bool) { + e.threadExpectations.ExpectTraverseRight(traverseRight) +} - return newThread +func (e *ExpectedState) ExpectNoContextSwitch() { + e.threadExpectations.StepsSinceLastContextSwitch += 1 +} + +func (e *ExpectedState) ExpectContextSwitch() { + e.threadExpectations.StepsSinceLastContextSwitch = 0 } func (e *ExpectedState) ActiveThread() *ExpectedThreadState { - return e.threadExpectations[e.ActiveThreadId] + return e.threadExpectations.activeThread() +} + +func (e *ExpectedState) ActiveThreadId() arch.Word { + return e.threadExpectations.ActiveThreadId +} + +func (e *ExpectedState) ExpectActiveThreadId(expected arch.Word) { + e.threadExpectations.ActiveThreadId = expected +} + +func (e *ExpectedState) ExpectNextThreadId(expected arch.Word) { + e.threadExpectations.NextThreadId = expected } func (e *ExpectedState) PrestateActiveThread() *ExpectedThreadState { - return e.threadExpectations[e.prestateActiveThreadId] + return e.threadExpectations.PrestateActiveThread() } func (e *ExpectedState) Thread(threadId arch.Word) *ExpectedThreadState { - return e.threadExpectations[threadId] + return e.threadExpectations.ThreadById(threadId) } func (e *ExpectedState) Validate(t require.TestingT, state mipsevm.FPVMState) { @@ -207,40 +186,211 @@ func (e *ExpectedState) Validate(t require.TestingT, state mipsevm.FPVMState) { require.Equalf(t, e.LastHint, actualState.GetLastHint(), "Expect lastHint = %v", e.LastHint) require.Equalf(t, e.MemoryRoot, common.Hash(actualState.GetMemory().MerkleRoot()), "Expect memory root = %v", e.MemoryRoot) // Thread-related global fields + e.threadExpectations.Validate(t, actualState) +} + +type threadExpectations struct { + ActiveThreadId arch.Word + StepsSinceLastContextSwitch uint64 + NextThreadId arch.Word + prestateActiveThread *ExpectedThreadState + // Cache the original value of the prestate active thread, so we can keep the original values before any updates + prestateActiveThreadValue ExpectedThreadState + traverseRight bool + left []*ExpectedThreadState + right []*ExpectedThreadState + popped []*ExpectedThreadState +} + +func newThreadExpectations(state *multithreaded.State) *threadExpectations { + left := expectedThreadStack(state.LeftThreadStack) + right := expectedThreadStack(state.RightThreadStack) + var prestateActiveThread *ExpectedThreadState + if state.TraverseRight { + prestateActiveThread = right[len(right)-1] + } else { + prestateActiveThread = left[len(left)-1] + } + + return &threadExpectations{ + ActiveThreadId: prestateActiveThread.ThreadId, + StepsSinceLastContextSwitch: state.StepsSinceLastContextSwitch, + NextThreadId: state.NextThreadId, + prestateActiveThread: prestateActiveThread, + prestateActiveThreadValue: *prestateActiveThread, + traverseRight: state.TraverseRight, + left: left, + right: right, + popped: make([]*ExpectedThreadState, 0), + } +} + +func (e *threadExpectations) Validate(t require.TestingT, state *multithreaded.State) { + actualState := ToMTState(t, state) + require.Equalf(t, e.StepsSinceLastContextSwitch, actualState.StepsSinceLastContextSwitch, "Expect StepsSinceLastContextSwitch = %v", e.StepsSinceLastContextSwitch) - require.Equalf(t, e.TraverseRight, actualState.TraverseRight, "Expect TraverseRight = %v", e.TraverseRight) + require.Equalf(t, e.traverseRight, actualState.TraverseRight, "Expect TraverseRight = %v", e.traverseRight) require.Equalf(t, e.NextThreadId, actualState.NextThreadId, "Expect NextThreadId = %v", e.NextThreadId) - require.Equalf(t, e.ThreadCount, actualState.ThreadCount(), "Expect thread count = %v", e.ThreadCount) - require.Equalf(t, e.RightStackSize, len(actualState.RightThreadStack), "Expect right stack size = %v", e.RightStackSize) - require.Equalf(t, e.LeftStackSize, len(actualState.LeftThreadStack), "Expect right stack size = %v", e.LeftStackSize) + require.Equalf(t, e.threadCount(), actualState.ThreadCount(), "Expect thread count = %v", e.threadCount()) // Check active thread - activeThread := actualState.GetCurrentThread() - require.Equal(t, e.ActiveThreadId, activeThread.ThreadId) - // Check all threads - expectedThreadCount := 0 - for tid, exp := range e.threadExpectations { - actualThread := FindThread(actualState, tid) - isActive := tid == activeThread.ThreadId - if exp.Dropped { - require.Nil(t, actualThread, "Thread %v should have been dropped", tid) - } else { - require.NotNil(t, actualThread, "Could not find thread matching expected thread with id %v", tid) - e.validateThread(t, exp, actualThread, isActive) - expectedThreadCount++ + activeThreadId := actualState.GetCurrentThread().ThreadId + require.Equal(t, e.ActiveThreadId, activeThreadId) + + // Check stacks + e.assertStackMatchesExpectations(t, e.left, actualState.LeftThreadStack, "left", activeThreadId) + e.assertStackMatchesExpectations(t, e.right, actualState.RightThreadStack, "right", activeThreadId) +} + +func (e *threadExpectations) assertStackMatchesExpectations(t require.TestingT, expectedStack []*ExpectedThreadState, actualStack []*multithreaded.ThreadState, label string, activeThreadId arch.Word) { + require.Equalf(t, len(expectedStack), len(actualStack), "Expect %v stack size = %v", label, len(expectedStack)) + for i, expectedThread := range expectedStack { + if i >= len(actualStack) { + // Break to avoid unit test panics - should be unreachable for actual tests + require.FailNow(t, "Missing thread") + break } + actualThread := actualStack[i] + e.validateThread(t, expectedThread, actualThread, activeThreadId) + } +} + +func (e *threadExpectations) validateThread(t require.TestingT, expected *ExpectedThreadState, actual *multithreaded.ThreadState, activeThreadId arch.Word) { + threadInfo := fmt.Sprintf("tid = %v, active = %v", actual.ThreadId, actual.ThreadId == activeThreadId) + require.Equalf(t, expected.ThreadId, actual.ThreadId, "Expect ThreadId = 0x%x (%v)", expected.ThreadId, threadInfo) + require.Equalf(t, expected.PC, actual.Cpu.PC, "Expect PC = 0x%x (%v)", expected.PC, threadInfo) + require.Equalf(t, expected.NextPC, actual.Cpu.NextPC, "Expect nextPC = 0x%x (%v)", expected.NextPC, threadInfo) + require.Equalf(t, expected.HI, actual.Cpu.HI, "Expect HI = 0x%x (%v)", expected.HI, threadInfo) + require.Equalf(t, expected.LO, actual.Cpu.LO, "Expect LO = 0x%x (%v)", expected.LO, threadInfo) + require.Equalf(t, expected.Registers, actual.Registers, "Expect registers to match (%v)", threadInfo) + require.Equalf(t, expected.ExitCode, actual.ExitCode, "Expect exitCode = %v (%v)", expected.ExitCode, threadInfo) + require.Equalf(t, expected.Exited, actual.Exited, "Expect exited = %v (%v)", expected.Exited, threadInfo) + require.Equalf(t, expected.Dropped, false, "Thread should not be dropped") +} + +func (e *threadExpectations) ExpectPreemption() { + e.StepsSinceLastContextSwitch = 0 + if e.traverseRight { + lastEl := len(e.right) - 1 + preempted := e.right[lastEl] + e.right = e.right[:lastEl] + e.left = append(e.left, preempted) + e.traverseRight = len(e.right) > 0 + } else { + lastEl := len(e.left) - 1 + preempted := e.left[lastEl] + e.left = e.left[:lastEl] + e.right = append(e.right, preempted) + e.traverseRight = len(e.left) == 0 } - require.Equal(t, expectedThreadCount, actualState.ThreadCount(), "Thread expectations do not match thread count") -} - -func (e *ExpectedState) validateThread(t require.TestingT, et *ExpectedThreadState, actual *multithreaded.ThreadState, isActive bool) { - threadInfo := fmt.Sprintf("tid = %v, active = %v", actual.ThreadId, isActive) - require.Equalf(t, et.ThreadId, actual.ThreadId, "Expect ThreadId = 0x%x (%v)", et.ThreadId, threadInfo) - require.Equalf(t, et.PC, actual.Cpu.PC, "Expect PC = 0x%x (%v)", et.PC, threadInfo) - require.Equalf(t, et.NextPC, actual.Cpu.NextPC, "Expect nextPC = 0x%x (%v)", et.NextPC, threadInfo) - require.Equalf(t, et.HI, actual.Cpu.HI, "Expect HI = 0x%x (%v)", et.HI, threadInfo) - require.Equalf(t, et.LO, actual.Cpu.LO, "Expect LO = 0x%x (%v)", et.LO, threadInfo) - require.Equalf(t, et.Registers, actual.Registers, "Expect registers to match (%v)", threadInfo) - require.Equalf(t, et.ExitCode, actual.ExitCode, "Expect exitCode = %v (%v)", et.ExitCode, threadInfo) - require.Equalf(t, et.Exited, actual.Exited, "Expect exited = %v (%v)", et.Exited, threadInfo) + e.updateActiveThreadId() +} + +func (e *threadExpectations) ExpectNewThread() *ExpectedThreadState { + e.StepsSinceLastContextSwitch = 0 + newThreadId := e.NextThreadId + e.NextThreadId += 1 + + // Copy expectations from prestate active thread's original value (before changing any expectations) + newThread := &ExpectedThreadState{} + *newThread = e.prestateActiveThreadValue + + newThread.ThreadId = newThreadId + if e.traverseRight { + e.right = append(e.right, newThread) + } else { + e.left = append(e.left, newThread) + } + + e.ActiveThreadId = newThreadId + return newThread +} + +func (e *threadExpectations) ExpectPop() { + e.StepsSinceLastContextSwitch = 0 + var popped *ExpectedThreadState + if e.traverseRight { + lastEl := len(e.right) - 1 + popped = e.right[lastEl] + popped.Dropped = true + e.right = e.right[:lastEl] + e.traverseRight = len(e.right) > 0 + } else { + lastEl := len(e.left) - 1 + popped = e.left[lastEl] + popped.Dropped = true + e.left = e.left[:lastEl] + e.traverseRight = len(e.left) == 0 + } + e.popped = append(e.popped, popped) + + e.updateActiveThreadId() +} + +func (e *threadExpectations) ExpectTraverseRight(traverseRight bool) { + e.traverseRight = traverseRight +} + +func (e *threadExpectations) PrestateActiveThread() *ExpectedThreadState { + return e.prestateActiveThread +} + +func (e *threadExpectations) ThreadById(threadId arch.Word) *ExpectedThreadState { + for _, thread := range e.allThreads() { + if thread.ThreadId == threadId { + return thread + } + } + return nil +} + +func (e *threadExpectations) allThreads() []*ExpectedThreadState { + var allThreads []*ExpectedThreadState + allThreads = append(allThreads, e.right...) + allThreads = append(allThreads, e.left...) + allThreads = append(allThreads, e.popped...) + + return allThreads +} + +func (e *threadExpectations) updateActiveThreadId() { + activeStack := e.activeStack() + e.ActiveThreadId = activeStack[len(activeStack)-1].ThreadId +} + +func (e *threadExpectations) threadCount() int { + return e.leftStackSize() + e.rightStackSize() +} + +func (e *threadExpectations) rightStackSize() int { + return len(e.right) +} + +func (e *threadExpectations) leftStackSize() int { + return len(e.left) +} + +func (e *threadExpectations) activeStack() []*ExpectedThreadState { + if e.traverseRight { + return e.right + } else { + return e.left + } +} + +func (e *threadExpectations) activeThread() *ExpectedThreadState { + lastEl := len(e.activeStack()) - 1 + if lastEl < 0 { + return nil + } + return e.activeStack()[lastEl] +} + +func expectedThreadStack(threadStack []*multithreaded.ThreadState) []*ExpectedThreadState { + expectedThreads := make([]*ExpectedThreadState, 0, len(threadStack)) + for _, threadState := range threadStack { + expectedThreads = append(expectedThreads, newExpectedThreadState(threadState)) + } + + return expectedThreads } diff --git a/cannon/mipsevm/multithreaded/testutil/expectations_test.go b/cannon/mipsevm/multithreaded/testutil/expectations_test.go index a4c1115ebb58c..ec38cae6fa318 100644 --- a/cannon/mipsevm/multithreaded/testutil/expectations_test.go +++ b/cannon/mipsevm/multithreaded/testutil/expectations_test.go @@ -6,17 +6,16 @@ import ( "github.com/stretchr/testify/require" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" ) -type ExpectationMutator func(e *ExpectedState, st *multithreaded.State) +type ExpectationMutator func(t *testing.T, e *ExpectedState) func TestValidate_shouldCatchMutations(t *testing.T) { states := []*multithreaded.State{ - RandomState(0), - RandomState(1), - RandomState(2), + randomStateWithMultipleThreads(0), + randomStateWithMultipleThreads(1), + randomStateWithMultipleThreads(2), } var emptyHash [32]byte someThread := RandomThread(123) @@ -25,83 +24,97 @@ func TestValidate_shouldCatchMutations(t *testing.T) { name string mut ExpectationMutator }{ - {name: "PreimageKey", mut: func(e *ExpectedState, st *multithreaded.State) { e.PreimageKey = emptyHash }}, - {name: "PreimageOffset", mut: func(e *ExpectedState, st *multithreaded.State) { e.PreimageOffset += 1 }}, - {name: "Heap", mut: func(e *ExpectedState, st *multithreaded.State) { e.Heap += 1 }}, - {name: "LLReservationStatus", mut: func(e *ExpectedState, st *multithreaded.State) { e.LLReservationStatus = e.LLReservationStatus + 1 }}, - {name: "LLAddress", mut: func(e *ExpectedState, st *multithreaded.State) { e.LLAddress += 1 }}, - {name: "LLOwnerThread", mut: func(e *ExpectedState, st *multithreaded.State) { e.LLOwnerThread += 1 }}, - {name: "ExitCode", mut: func(e *ExpectedState, st *multithreaded.State) { e.ExitCode += 1 }}, - {name: "Exited", mut: func(e *ExpectedState, st *multithreaded.State) { e.Exited = !e.Exited }}, - {name: "Step", mut: func(e *ExpectedState, st *multithreaded.State) { e.Step += 1 }}, - {name: "LastHint", mut: func(e *ExpectedState, st *multithreaded.State) { e.LastHint = []byte{7, 8, 9, 10} }}, - {name: "MemoryRoot", mut: func(e *ExpectedState, st *multithreaded.State) { e.MemoryRoot = emptyHash }}, - {name: "StepsSinceLastContextSwitch", mut: func(e *ExpectedState, st *multithreaded.State) { e.StepsSinceLastContextSwitch += 1 }}, - {name: "TraverseRight", mut: func(e *ExpectedState, st *multithreaded.State) { e.TraverseRight = !e.TraverseRight }}, - {name: "NextThreadId", mut: func(e *ExpectedState, st *multithreaded.State) { e.NextThreadId += 1 }}, - {name: "ThreadCount", mut: func(e *ExpectedState, st *multithreaded.State) { e.ThreadCount += 1 }}, - {name: "RightStackSize", mut: func(e *ExpectedState, st *multithreaded.State) { e.RightStackSize += 1 }}, - {name: "LeftStackSize", mut: func(e *ExpectedState, st *multithreaded.State) { e.LeftStackSize += 1 }}, - {name: "ActiveThreadId", mut: func(e *ExpectedState, st *multithreaded.State) { e.ActiveThreadId += 1 }}, - {name: "Empty thread expectations", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations = map[arch.Word]*ExpectedThreadState{} + {name: "PreimageKey", mut: func(t *testing.T, e *ExpectedState) { e.PreimageKey = emptyHash }}, + {name: "PreimageOffset", mut: func(t *testing.T, e *ExpectedState) { e.PreimageOffset += 1 }}, + {name: "Heap", mut: func(t *testing.T, e *ExpectedState) { e.Heap += 1 }}, + {name: "LLReservationStatus", mut: func(t *testing.T, e *ExpectedState) { + e.LLReservationStatus = e.LLReservationStatus + 1 }}, - {name: "Mismatched thread expectations", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations = map[arch.Word]*ExpectedThreadState{someThread.ThreadId: newExpectedThreadState(someThread)} + {name: "LLAddress", mut: func(t *testing.T, e *ExpectedState) { e.LLAddress += 1 }}, + {name: "LLOwnerThread", mut: func(t *testing.T, e *ExpectedState) { e.LLOwnerThread += 1 }}, + {name: "ExitCode", mut: func(t *testing.T, e *ExpectedState) { e.ExitCode += 1 }}, + {name: "Exited", mut: func(t *testing.T, e *ExpectedState) { e.Exited = !e.Exited }}, + {name: "Step", mut: func(t *testing.T, e *ExpectedState) { e.Step += 1 }}, + {name: "LastHint", mut: func(t *testing.T, e *ExpectedState) { e.LastHint = []byte{7, 8, 9, 10} }}, + {name: "MemoryRoot", mut: func(t *testing.T, e *ExpectedState) { e.MemoryRoot = emptyHash }}, + {name: "StepsSinceLastContextSwitch", mut: func(t *testing.T, e *ExpectedState) { + e.threadExpectations.StepsSinceLastContextSwitch += 1 }}, - {name: "Active threadId", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].ThreadId += 1 + {name: "TraverseRight", mut: func(t *testing.T, e *ExpectedState) { + e.threadExpectations.traverseRight = !e.threadExpectations.traverseRight }}, - {name: "Active thread exitCode", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].ExitCode += 1 + {name: "NextThreadId", mut: func(t *testing.T, e *ExpectedState) { e.threadExpectations.NextThreadId += 1 }}, + {name: "ActiveThreadId", mut: func(t *testing.T, e *ExpectedState) { + e.threadExpectations.ActiveThreadId += 1 }}, - {name: "Active thread exited", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].Exited = !st.GetCurrentThread().Exited + {name: "Empty thread expectations", mut: func(t *testing.T, e *ExpectedState) { + e.threadExpectations.left = []*ExpectedThreadState{} + e.threadExpectations.right = []*ExpectedThreadState{} }}, - {name: "Active thread PC", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].PC += 1 + {name: "Missing single thread expectation", mut: func(t *testing.T, e *ExpectedState) { + if len(e.threadExpectations.left) > 0 { + e.threadExpectations.left = e.threadExpectations.left[:len(e.threadExpectations.left)-1] + } else { + e.threadExpectations.right = e.threadExpectations.right[:len(e.threadExpectations.right)-1] + } }}, - {name: "Active thread NextPC", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].NextPC += 1 + {name: "Extra thread expectation", mut: func(t *testing.T, e *ExpectedState) { + e.threadExpectations.left = append(e.threadExpectations.left, newExpectedThreadState(someThread)) }}, - {name: "Active thread HI", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].HI += 1 + {name: "Active threadId", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().ThreadId += 1 }}, - {name: "Active thread LO", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].LO += 1 + {name: "Active thread exitCode", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().ExitCode += 1 }}, - {name: "Active thread Registers", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].Registers[0] += 1 + {name: "Active thread exited", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().Exited = !e.ActiveThread().Exited }}, - {name: "Active thread dropped", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[st.GetCurrentThread().ThreadId].Dropped = true + {name: "Active thread PC", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().PC += 1 }}, - {name: "Inactive threadId", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].ThreadId += 1 + {name: "Active thread NextPC", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().NextPC += 1 }}, - {name: "Inactive thread exitCode", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].ExitCode += 1 + {name: "Active thread HI", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().HI += 1 }}, - {name: "Inactive thread exited", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].Exited = !FindNextThread(st).Exited + {name: "Active thread LO", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().LO += 1 }}, - {name: "Inactive thread PC", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].PC += 1 + {name: "Active thread Registers", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().Registers[0] += 1 }}, - {name: "Inactive thread NextPC", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].NextPC += 1 + {name: "Active thread dropped", mut: func(t *testing.T, e *ExpectedState) { + e.ActiveThread().Dropped = true }}, - {name: "Inactive thread HI", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].HI += 1 + {name: "Inactive threadId", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).ThreadId += 1 }}, - {name: "Inactive thread LO", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].LO += 1 + {name: "Inactive thread exitCode", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).ExitCode += 1 }}, - {name: "Inactive thread Registers", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].Registers[0] += 1 + {name: "Inactive thread exited", mut: func(t *testing.T, e *ExpectedState) { + thread := findInactiveThread(t, e) + thread.Exited = !thread.Exited }}, - {name: "Inactive thread dropped", mut: func(e *ExpectedState, st *multithreaded.State) { - e.threadExpectations[FindNextThread(st).ThreadId].Dropped = true + {name: "Inactive thread PC", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).PC += 1 + }}, + {name: "Inactive thread NextPC", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).NextPC += 1 + }}, + {name: "Inactive thread HI", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).HI += 1 + }}, + {name: "Inactive thread LO", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).LO += 1 + }}, + {name: "Inactive thread Registers", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).Registers[0] += 1 + }}, + {name: "Inactive thread dropped", mut: func(t *testing.T, e *ExpectedState) { + findInactiveThread(t, e).Dropped = true }}, } for _, c := range cases { @@ -109,7 +122,7 @@ func TestValidate_shouldCatchMutations(t *testing.T) { testName := fmt.Sprintf("%v (state #%v)", c.name, i) t.Run(testName, func(t *testing.T) { expected := NewExpectedState(t, state) - c.mut(expected, state) + c.mut(t, expected) // We should detect the change and fail mockT := &MockTestingT{} @@ -123,9 +136,9 @@ func TestValidate_shouldCatchMutations(t *testing.T) { func TestValidate_shouldPassUnchangedExpectations(t *testing.T) { states := []*multithreaded.State{ - RandomState(0), - RandomState(1), - RandomState(2), + RandomState(10), + RandomState(11), + RandomState(12), } for i, state := range states { @@ -140,6 +153,43 @@ func TestValidate_shouldPassUnchangedExpectations(t *testing.T) { } } +func TestExpectNewThread_DoesNotInheritChangedExpectations(t *testing.T) { + state := RandomState(123) + expected := NewExpectedState(t, state) + + // Make some changes to the active thread + origHI := expected.ActiveThread().HI + expected.ActiveThread().HI = 123 + + // Create a new thread + newThread := expected.ExpectNewThread() + + // New thread should not carry over changes to the original thread + require.Equal(t, origHI, newThread.HI) +} + +func findInactiveThread(t *testing.T, e *ExpectedState) *ExpectedThreadState { + threads := e.threadExpectations.allThreads() + activeThread := e.ActiveThread() + for _, thread := range threads { + if thread.ThreadId != activeThread.ThreadId { + return thread + } + } + t.Error("No inactive thread found") + t.FailNow() + return nil +} + +func randomStateWithMultipleThreads(seed int64) *multithreaded.State { + state := RandomState(int(seed)) + if state.ThreadCount() == 1 { + // Make sure we have at least 2 threads + SetupThreads(seed+100, state, state.TraverseRight, 1, 1) + } + return state +} + type MockTestingT struct { errCount int } diff --git a/cannon/mipsevm/tests/difftester.go b/cannon/mipsevm/tests/difftester.go index 26ed4e7669c74..19eb8790446f7 100644 --- a/cannon/mipsevm/tests/difftester.go +++ b/cannon/mipsevm/tests/difftester.go @@ -20,9 +20,67 @@ import ( type TestNamer[T any] func(testCase T) string -type InitializeStateFn[T any] func(testCase T, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) -type SetExpectationsFn[T any] func(testCase T, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult -type PostStepCheckFn[T any] func(t require.TestingT, testCase T, vm VersionedVMTestCase, deps *TestDependencies) +func NoopTestNamer[T any](c T) string { + return "" +} + +type SimpleInitializeStateFn func(t require.TestingT, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) +type SimpleSetExpectationsFn func(t require.TestingT, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult +type SimplePostStepCheckFn func(t require.TestingT, vm VersionedVMTestCase, deps *TestDependencies, witness *mipsevm.StepWitness) + +type soloTestCase struct { + name string +} + +type SimpleDiffTester struct { + diffTester DiffTester[soloTestCase] +} + +// NewSimpleDiffTester returns a DiffTester designed to run only a single default test case +func NewSimpleDiffTester() *SimpleDiffTester { + return &SimpleDiffTester{ + diffTester: *NewDiffTester(func(t soloTestCase) string { + return t.name + }), + } +} + +func (d *SimpleDiffTester) InitState(initStateFn SimpleInitializeStateFn, opts ...mtutil.StateOption) *SimpleDiffTester { + wrappedFn := func(t require.TestingT, _ soloTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + initStateFn(t, state, vm, r, goVm) + } + d.diffTester.InitState(wrappedFn, opts...) + return d +} + +func (d *SimpleDiffTester) SetExpectations(setExpectationsFn SimpleSetExpectationsFn) *SimpleDiffTester { + wrappedFn := func(t require.TestingT, testCase soloTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + return setExpectationsFn(t, expect, vm) + } + d.diffTester.SetExpectations(wrappedFn) + + return d +} + +func (d *SimpleDiffTester) PostCheck(postStepCheckFn SimplePostStepCheckFn) *SimpleDiffTester { + wrappedFn := func(t require.TestingT, testCase soloTestCase, vm VersionedVMTestCase, deps *TestDependencies, wit *mipsevm.StepWitness) { + postStepCheckFn(t, vm, deps, wit) + } + d.diffTester.PostCheck(wrappedFn) + + return d +} + +func (d *SimpleDiffTester) Run(t *testing.T, opts ...TestOption) { + singleTestCase := []soloTestCase{ + {name: "solo test case"}, + } + d.diffTester.run(wrapT(t), singleTestCase, opts...) +} + +type InitializeStateFn[T any] func(t require.TestingT, testCase T, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) +type SetExpectationsFn[T any] func(t require.TestingT, testCase T, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult +type PostStepCheckFn[T any] func(t require.TestingT, testCase T, vm VersionedVMTestCase, deps *TestDependencies, witness *mipsevm.StepWitness) type DiffTester[T any] struct { testNamer TestNamer[T] @@ -70,34 +128,37 @@ func (d *DiffTester[T]) run(t testRunner, testCases []T, opts ...TestOption) { cfg := newTestConfig(t, opts...) for _, vm := range cfg.vms { for i, testCase := range testCases { - randSeed := randomSeed(t, d.testNamer(testCase), i) - mods := d.generateTestModifiers(t, testCase, vm, d.setExpectations, cfg, randSeed) + randSeed := cfg.randomSeed + if randSeed == 0 { + randSeed = randomSeed(t, d.testNamer(testCase), i) + } + mods := d.generateTestModifiers(t, testCase, vm, cfg, randSeed) for _, mod := range mods { testName := fmt.Sprintf("%v%v (%v)", d.testNamer(testCase), mod.name, vm.Name) t.Run(testName, func(t testcaseT) { t.Parallel() - testDeps := cfg.testDependencies() - stateOpts := []mtutil.StateOption{mtutil.WithRandomization(randSeed)} - stateOpts = append(stateOpts, d.stateOpts...) - goVm := vm.VMFactory(testDeps.po, testDeps.stdOut, testDeps.stdErr, testDeps.logger, stateOpts...) - state := mtutil.GetMtState(t, goVm) - - // Set up state - r := testutil.NewRandHelper(randSeed * 2) - d.initState(testCase, state, vm, r) - mod.stateMod(state) - - // Set up expectations - expect := d.expectedState(t, state) - execExpectation := d.setExpectations(testCase, expect, vm) - mod.expectMod(expect) + setup := mod.cachedSetup + if setup == nil { + setup = d.newTestSetup(t, testCase, vm, cfg, randSeed, mod) + } - execExpectation.assertExpectedResult(t, goVm, vm, expect, cfg) + expect := setup.expect + execExpectation := setup.expectedResult + var witness *mipsevm.StepWitness + for i := 0; i < cfg.steps; i++ { + if i > 0 { + // After the initial step, we need to set up our expectations again + expect = d.expectedState(t, setup.state) + execExpectation = d.setExpectations(t, testCase, expect, vm) + } + + witness = execExpectation.assertExpectedResult(t, setup.goVm, vm, expect, cfg) + } // Run post-step checks if d.postStepCheck != nil { - d.postStepCheck(t, testCase, vm, testDeps) + d.postStepCheck(t, testCase, vm, setup.deps, witness) } }) } @@ -105,6 +166,34 @@ func (d *DiffTester[T]) run(t testRunner, testCases []T, opts ...TestOption) { } } +func (d *DiffTester[T]) newTestSetup(t require.TestingT, testCase T, vm VersionedVMTestCase, cfg *TestConfig, randSeed int64, mod *testModifier) *testSetup { + testDeps := cfg.testDependencies() + + stateOpts := []mtutil.StateOption{mtutil.WithRandomization(randSeed)} + stateOpts = append(stateOpts, d.stateOpts...) + goVm := vm.VMFactory(testDeps.po, testDeps.stdOut, testDeps.stdErr, testDeps.logger, stateOpts...) + + state := mtutil.GetMtState(t, goVm) + d.initState(t, testCase, state, vm, testutil.NewRandHelper(randSeed*2), goVm) + if mod != nil { + mod.stateMod(state) + } + + expect := d.expectedState(t, state) + if mod != nil { + mod.expectMod(expect) + } + expectedResult := d.setExpectations(t, testCase, expect, vm) + + return &testSetup{ + deps: testDeps, + goVm: goVm, + state: state, + expect: expect, + expectedResult: expectedResult, + } +} + func (d *DiffTester[T]) expectedState(t require.TestingT, state *multithreaded.State) *mtutil.ExpectedState { if mtutil.ActiveThreadCount(state) == 0 { // State is invalid, just return an empty expectation @@ -128,32 +217,32 @@ func (d *DiffTester[T]) isConfigValid(t testRunner) bool { } type testModifier struct { - name string - stateMod func(state *multithreaded.State) - expectMod func(expect *mtutil.ExpectedState) + name string + stateMod func(state *multithreaded.State) + expectMod func(expect *mtutil.ExpectedState) + cachedSetup *testSetup } -func newTestModifier(name string) *testModifier { +func newTestModifier(name string, cachedSetup *testSetup) *testModifier { return &testModifier{ - name: name, - stateMod: func(state *multithreaded.State) {}, - expectMod: func(expect *mtutil.ExpectedState) {}, + name: name, + stateMod: func(state *multithreaded.State) {}, + expectMod: func(expect *mtutil.ExpectedState) {}, + cachedSetup: cachedSetup, } } -func (d *DiffTester[T]) generateTestModifiers(t require.TestingT, testCase T, vm VersionedVMTestCase, setExpectations SetExpectationsFn[T], cfg *TestConfig, randSeed int64) []*testModifier { +func (d *DiffTester[T]) generateTestModifiers(t require.TestingT, testCase T, vm VersionedVMTestCase, cfg *TestConfig, randSeed int64) []*testModifier { + // Set up state + setup := d.newTestSetup(t, testCase, vm, cfg, randSeed, nil) + + // Build modifiers array, start with the original case (noop modification) modifiers := []*testModifier{ - newTestModifier(""), // Always return a noop + newTestModifier("", setup), // Always return a noop } - // Process expectations - goVm := vm.VMFactory(nil, nil, nil, nil) - state := mtutil.GetMtState(t, goVm) - expect := mtutil.NewExpectedState(t, state) - setExpectations(testCase, expect, vm) - // Generate test modifiers based on expectations - modifiers = append(modifiers, d.memReservationTestModifier(cfg, randSeed, expect)...) + modifiers = append(modifiers, d.memReservationTestModifier(cfg, randSeed, setup.expect)...) return modifiers } @@ -234,6 +323,14 @@ func randomSeed(t require.TestingT, s string, extraData ...int) int64 { return int64(h.Sum64()) } +type testSetup struct { + deps *TestDependencies + goVm mipsevm.FPVM + state *multithreaded.State + expect *mtutil.ExpectedState + expectedResult ExpectedExecResult +} + type TestDependencies struct { po mipsevm.PreimageOracle stdOut io.Writer @@ -242,7 +339,9 @@ type TestDependencies struct { } type TestConfig struct { - vms []VersionedVMTestCase + vms []VersionedVMTestCase + steps int + // Dependencies po func() mipsevm.PreimageOracle stdOut func() io.Writer stdErr func() io.Writer @@ -251,6 +350,8 @@ type TestConfig struct { tracingHooks *tracing.Hooks // Allow consumer to control automated test generation skipAutomaticMemoryReservationTests bool + // Allow consumer to configure a random seed, if not configured (equal to 0) one will be generated + randomSeed int64 } func (c *TestConfig) testDependencies() *TestDependencies { @@ -282,6 +383,18 @@ func WithVm(vm VersionedVMTestCase) TestOption { } } +func WithVms(vms []VersionedVMTestCase) TestOption { + return func(tc *TestConfig) { + tc.vms = vms + } +} + +func WithRandomSeed(seed int64) TestOption { + return func(tc *TestConfig) { + tc.randomSeed = seed + } +} + // WithTracingHooks Sets tracing hooks - see: testutil.MarkdownTracer func WithTracingHooks(hooks *tracing.Hooks) TestOption { return func(tc *TestConfig) { @@ -289,23 +402,38 @@ func WithTracingHooks(hooks *tracing.Hooks) TestOption { } } +func WithSteps(steps int) TestOption { + return func(tc *TestConfig) { + if steps < 1 { + steps = 1 + } + tc.steps = steps + } +} + func newTestConfig(t require.TestingT, opts ...TestOption) *TestConfig { testConfig := &TestConfig{ - vms: GetMipsVersionTestCases(t), po: func() mipsevm.PreimageOracle { return nil }, stdOut: func() io.Writer { return os.Stdout }, stdErr: func() io.Writer { return os.Stderr }, logger: testutil.CreateLogger(), + steps: 1, } for _, opt := range opts { opt(testConfig) } + + // Generating vm versions is expensive, only do it if necessary + if testConfig.vms == nil { + testConfig.vms = GetMipsVersionTestCases(t) + } + return testConfig } type ExpectedExecResult interface { - assertExpectedResult(t testing.TB, vm mipsevm.FPVM, vmType VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) + assertExpectedResult(t testing.TB, vm mipsevm.FPVM, vmType VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) *mipsevm.StepWitness } type normalExecResult struct{} @@ -314,7 +442,7 @@ func ExpectNormalExecution() ExpectedExecResult { return normalExecResult{} } -func (e normalExecResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) { +func (e normalExecResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) *mipsevm.StepWitness { // Step the VM state := goVm.GetState() step := state.GetStep() @@ -324,6 +452,8 @@ func (e normalExecResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, // Validate expect.Validate(t, state) testutil.ValidateEVM(t, stepWitness, step, goVm, vmVersion.StateHashFn, vmVersion.Contracts) + + return stepWitness } type vmPanicResult struct { @@ -369,7 +499,7 @@ func ExpectVmPanicWithCustomErr(goPanicMsg interface{}, customErrSignature strin return result } -func (e vmPanicResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) { +func (e vmPanicResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) *mipsevm.StepWitness { state := goVm.GetState() proofData := e.proofData if proofData == nil { @@ -384,6 +514,8 @@ func (e vmPanicResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmV } else { t.Fatalf("Invalid panic value provided. Go panic value must be a string or error. Got: %v", e.panicValue) } + + return nil } type preimageOracleRevertResult struct { @@ -402,9 +534,10 @@ func ExpectPreimageOraclePanic(preimageKey [32]byte, preimageValue []byte, preim } } -func (e preimageOracleRevertResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) { +func (e preimageOracleRevertResult) assertExpectedResult(t testing.TB, goVm mipsevm.FPVM, vmVersion VersionedVMTestCase, expect *mtutil.ExpectedState, cfg *TestConfig) *mipsevm.StepWitness { require.PanicsWithValue(t, e.panicMsg, func() { _, _ = goVm.Step(true) }) testutil.AssertPreimageOracleReverts(t, e.preimageKey, e.preimageValue, e.preimageOffset, vmVersion.Contracts) + return nil } type testcaseT interface { diff --git a/cannon/mipsevm/tests/difftester_test.go b/cannon/mipsevm/tests/difftester_test.go index 10a1045630995..2647689bf79c9 100644 --- a/cannon/mipsevm/tests/difftester_test.go +++ b/cannon/mipsevm/tests/difftester_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" @@ -24,13 +25,13 @@ func TestDiffTester_Run_SimpleTest(t *testing.T) { testName := fmt.Sprintf("useCorrectReturnExpectation=%v", useCorrectReturnExpectation) t.Run(testName, func(t *testing.T) { initStateCalled := make(map[string]int) - initState := func(testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { initStateCalled[testCase.name] += 1 testutil.StoreInstruction(state.GetMemory(), state.GetPC(), testCase.insn) } expectationsCalled := make(map[string]int) - setExpectations := func(testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expectationsCalled[testCase.name] += 1 expect.ExpectStep() @@ -53,10 +54,9 @@ func TestDiffTester_Run_SimpleTest(t *testing.T) { // Validate that we invoked initState and setExpectations as expected for _, c := range testCases { - testsPerCase := len(versions) - require.Equal(t, testsPerCase, initStateCalled[c.name]) - // Difftester runs extra calls on the expectations fn in order to analyze the tests - require.Equal(t, testsPerCase+len(versions), expectationsCalled[c.name]) + expectedCalls := len(versions) + require.Equal(t, expectedCalls, initStateCalled[c.name]) + require.Equal(t, expectedCalls, expectationsCalled[c.name]) } // Validate that tests ran and passed as expected @@ -70,6 +70,68 @@ func TestDiffTester_Run_SimpleTest(t *testing.T) { } } +func TestDiffTester_Run_WithSteps(t *testing.T) { + outterCases := []struct { + name string + steps int + expectedSteps int + }{ + {name: "0 steps", steps: 0, expectedSteps: 1}, + {name: "negative steps", steps: -1, expectedSteps: 1}, + {name: "1 step", steps: 1, expectedSteps: 1}, + {name: "2 step", steps: 2, expectedSteps: 2}, + {name: "3 step", steps: 3, expectedSteps: 3}, + } + + // Run simple noop instruction (0x0) + cases := []simpleTestCase{ + {name: "a", insn: 0x0}, + } + + for _, oc := range outterCases { + t.Run(oc.name, func(t *testing.T) { + initStateCalled := make(map[string]int) + initState := func(t require.TestingT, testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + initStateCalled[testCase.name] += 1 + testutil.StoreInstruction(state.GetMemory(), state.GetPC(), testCase.insn) + } + + expectationsCalled := make(map[string]int) + setExpectations := func(t require.TestingT, testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expectationsCalled[testCase.name] += 1 + expect.ExpectStep() + return ExpectNormalExecution() + } + + versions := GetMipsVersionTestCases(t) + expectedTestCases := generateExpectedTestCases(cases, versions) + + // Run tests + tRunner := newMockTestRunner(t) + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + run(tRunner, cases, WithSteps(oc.steps)) + + // Validate that we invoked initState and setExpectations as expected + for _, c := range cases { + initCalls := len(versions) + expectCalls := oc.expectedSteps * len(versions) + require.Equal(t, initCalls, initStateCalled[c.name]) + require.Equal(t, expectCalls, expectationsCalled[c.name]) + } + + // Validate that tests ran and passed as expected + require.Equal(t, len(tRunner.childTestMocks), len(expectedTestCases)) + for _, testCase := range expectedTestCases { + failed, err := tRunner.testFailedOrPanicked(testCase) + require.NoError(t, err) + require.Equal(t, false, failed) + } + }) + } +} + func TestDiffTester_Run_WithMemModifications(t *testing.T) { // Test store word (sw), which modifies memory baseReg := uint32(9) @@ -91,16 +153,16 @@ func TestDiffTester_Run_WithMemModifications(t *testing.T) { t.Run(testName, func(t *testing.T) { initStateCalled := make(map[string]int) - initState := func(tt simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { initStateCalled[tt.name] += 1 - testutil.StoreInstruction(state.GetMemory(), pc, tt.insn) + storeInsnWithCache(state, goVm, pc, tt.insn) state.GetMemory().SetWord(effAddr, 0xAA_BB_CC_DD_A1_B1_C1_D1) state.GetRegistersRef()[rtReg] = 0x11_22_33_44_55_66_77_88 state.GetRegistersRef()[baseReg] = base } expectationsCalled := make(map[string]int) - setExpectations := func(tt simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expectationsCalled[tt.name] += 1 expect.ExpectStep() expect.ExpectMemoryWrite(effAddr, 0x55_66_77_88_A1_B1_C1_D1) @@ -131,10 +193,9 @@ func TestDiffTester_Run_WithMemModifications(t *testing.T) { // Validate that we invoked initState and setExpectations as expected for _, c := range testCases { - testsPerCase := len(versions) * (len(mods) + 1) - require.Equal(t, testsPerCase, initStateCalled[c.name]) - // Difftester runs extra calls on the expectations fn in order to analyze the tests - require.Equal(t, testsPerCase+len(versions), expectationsCalled[c.name]) + expectedCalls := len(versions) * (len(mods) + 1) + require.Equal(t, expectedCalls, initStateCalled[c.name]) + require.Equal(t, expectedCalls, expectationsCalled[c.name]) } // Validate that tests ran and passed @@ -159,14 +220,14 @@ func TestDiffTester_Run_WithPanic(t *testing.T) { testName := fmt.Sprintf("useCorrectReturnExpectation=%v", useCorrectReturnExpectation) t.Run(testName, func(t *testing.T) { initStateCalled := make(map[string]int) - initState := func(testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { initStateCalled[testCase.name] += 1 testutil.StoreInstruction(state.GetMemory(), state.GetPC(), testCase.insn) state.GetRegistersRef()[2] = syscallNum } expectationsCalled := make(map[string]int) - setExpectations := func(testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expectationsCalled[testCase.name] += 1 expect.ExpectStep() @@ -189,10 +250,9 @@ func TestDiffTester_Run_WithPanic(t *testing.T) { // Validate that we invoked initState and setExpectations as expected for _, c := range testCases { - testsPerCase := len(versions) - require.Equal(t, testsPerCase, initStateCalled[c.name]) - // Difftester runs extra calls on the expectations fn in order to analyze the tests - require.Equal(t, testsPerCase+len(versions), expectationsCalled[c.name]) + expectedCalls := len(versions) + require.Equal(t, expectedCalls, initStateCalled[c.name]) + require.Equal(t, expectedCalls, expectationsCalled[c.name]) } // Validate that tests ran and passed as expected @@ -220,13 +280,13 @@ func TestDiffTester_Run_WithVm(t *testing.T) { } initStateCalled := make(map[string]int) - initState := func(testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, testCase simpleTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { initStateCalled[testCase.name] += 1 testutil.StoreInstruction(state.GetMemory(), state.GetPC(), testCase.insn) } expectationsCalled := make(map[string]int) - setExpectations := func(testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase simpleTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expectationsCalled[testCase.name] += 1 expect.ExpectStep() @@ -247,8 +307,7 @@ func TestDiffTester_Run_WithVm(t *testing.T) { // Validate that we invoked initState and setExpectations as expected for _, c := range testCases { require.Equal(t, 1, initStateCalled[c.name]) - // Difftester runs extra calls on the expectations fn in order to analyze the tests - require.Equal(t, 2, expectationsCalled[c.name]) + require.Equal(t, 1, expectationsCalled[c.name]) } // Validate that we ran the expected tests diff --git a/cannon/mipsevm/tests/evm_common64_test.go b/cannon/mipsevm/tests/evm_common64_test.go index e4422e277bec8..9bdaf17305bd2 100644 --- a/cannon/mipsevm/tests/evm_common64_test.go +++ b/cannon/mipsevm/tests/evm_common64_test.go @@ -1,17 +1,14 @@ package tests import ( - "fmt" - "os" - "slices" "testing" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) func TestEVM_SingleStep_Operators64(t *testing.T) { @@ -100,14 +97,21 @@ func TestEVM_SingleStep_Bitwise64(t *testing.T) { } func TestEVM_SingleStep_Shift64(t *testing.T) { - cases := []struct { + + type testCase struct { name string rd Word rt Word sa uint32 funct uint32 expectRes Word - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "dsll", funct: 0x38, rd: Word(0xAA_BB_CC_DD_A1_B1_C1_D1), rt: Word(0x1), sa: 0, expectRes: Word(0x1)}, // dsll t8, s2, 0 {name: "dsll", funct: 0x38, rd: Word(0xAA_BB_CC_DD_A1_B1_C1_D1), rt: Word(0x1), sa: 1, expectRes: Word(0x2)}, // dsll t8, s2, 1 {name: "dsll", funct: 0x38, rd: Word(0xAA_BB_CC_DD_A1_B1_C1_D1), rt: Word(0x1), sa: 31, expectRes: Word(0x80_00_00_00)}, // dsll t8, s2, 31 @@ -146,39 +150,27 @@ func TestEVM_SingleStep_Shift64(t *testing.T) { {name: "dsra32", funct: 0x3f, rd: Word(0xAA_BB_CC_DD_A1_B1_C1_D1), rt: Word(0x7F_FF_FF_FF_FF_FF_FF_FF), sa: 31, expectRes: Word(0x0)}, // dsra32 t8, s2, 1 } - for i, tt := range cases { - for _, v := range GetMipsVersionTestCases(t) { - v := v - testName := fmt.Sprintf("%v %v", v.Name, tt.name) - t.Run(testName, func(t *testing.T) { - pc := Word(0x0) - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(pc)) - state := goVm.GetState() - var insn uint32 - var rtReg uint32 - var rdReg uint32 - rtReg = 18 - rdReg = 8 - insn = rtReg<<16 | rdReg<<11 | tt.sa<<6 | tt.funct - state.GetRegistersRef()[rdReg] = tt.rd - state.GetRegistersRef()[rtReg] = tt.rt - testutil.StoreInstruction(state.GetMemory(), pc, insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[rdReg] = tt.expectRes - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + pc := Word(0x0) + rdReg := uint32(8) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + rtReg := uint32(18) + insn := rtReg<<16 | rdReg<<11 | tt.sa<<6 | tt.funct + state.GetRegistersRef()[rdReg] = tt.rd + state.GetRegistersRef()[rtReg] = tt.rt + storeInsnWithCache(state, goVm, pc, insn) } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[rdReg] = tt.expectRes + + return ExpectNormalExecution() + } + + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(pc)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_LoadStore64(t *testing.T) { @@ -548,27 +540,15 @@ func TestEVM_SingleStep_DCloDClz64(t *testing.T) { {name: "dclz", rs: Word(0x80_00_00_00_00_00_00_00), expectedResult: Word(0), funct: 0b10_0100}, } - vmVersions := GetMipsVersionTestCases(t) - require.True(t, slices.ContainsFunc(vmVersions, func(v VersionedVMTestCase) bool { - features := versions.FeaturesForVersion(v.Version) - return features.SupportDclzDclo - }), "dclz/dclo feature not tested") - - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insnFn(tt)) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), insnFn(tt)) state.GetRegistersRef()[rsReg] = tt.rs } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { - features := versions.FeaturesForVersion(vm.Version) - if features.SupportDclzDclo { - expected.ExpectStep() - expected.ActiveThread().Registers[rdReg] = tt.expectedResult - return ExpectNormalExecution() - } else { - expectedMsg := fmt.Sprintf("invalid instruction: %x", insnFn(tt)) - return ExpectVmPanic(expectedMsg, "invalid instruction") - } + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[rdReg] = tt.expectedResult + return ExpectNormalExecution() } NewDiffTester(testNamer). diff --git a/cannon/mipsevm/tests/evm_common_test.go b/cannon/mipsevm/tests/evm_common_test.go index 03154ddf259ae..46065708b1231 100644 --- a/cannon/mipsevm/tests/evm_common_test.go +++ b/cannon/mipsevm/tests/evm_common_test.go @@ -2,7 +2,6 @@ package tests import ( "bytes" - "fmt" "io" "math/big" "os" @@ -25,48 +24,57 @@ import ( "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) +type insnCache interface { + UpdateInstructionCache(pc arch.Word) +} + +func storeInsnWithCache(state *multithreaded.State, goVm mipsevm.FPVM, pc arch.Word, insn uint32) { + testutil.StoreInstruction(state.GetMemory(), pc, insn) + if ic, ok := goVm.(insnCache); ok { + ic.UpdateInstructionCache(pc) + } +} + func TestEVM_SingleStep_Jump(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string pc arch.Word nextPC arch.Word insn uint32 expectNextPC arch.Word expectLink bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "j MSB set target", pc: 0, nextPC: 4, insn: 0x0A_00_00_02, expectNextPC: 0x08_00_00_08}, // j 0x02_00_00_02 {name: "j non-zero PC region", pc: 0x10000000, nextPC: 0x10000004, insn: 0x08_00_00_02, expectNextPC: 0x10_00_00_08}, // j 0x2 {name: "jal MSB set target", pc: 0, nextPC: 4, insn: 0x0E_00_00_02, expectNextPC: 0x08_00_00_08, expectLink: true}, // jal 0x02_00_00_02 {name: "jal non-zero PC region", pc: 0x10000000, nextPC: 0x10000004, insn: 0x0C_00_00_02, expectNextPC: 0x10_00_00_08, expectLink: true}, // jal 0x2 } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPC(tt.pc), mtutil.WithNextPC(tt.nextPC)) - state := goVm.GetState() - testutil.StoreInstruction(state.GetMemory(), tt.pc, tt.insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().NextPC = tt.expectNextPC - if tt.expectLink { - expected.ActiveThread().Registers[31] = state.GetPC() + 8 - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.GetCurrentThread().Cpu.PC = tt.pc + state.GetCurrentThread().Cpu.NextPC = tt.nextPC + storeInsnWithCache(state, goVm, tt.pc, tt.insn) + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().NextPC = tt.expectNextPC + if tt.expectLink { + expected.ActiveThread().Registers[31] = tt.pc + 8 } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_Operators(t *testing.T) { @@ -139,54 +147,52 @@ func TestEVM_SingleStep_Bitwise(t *testing.T) { } func TestEVM_SingleStep_Lui(t *testing.T) { - versions := GetMipsVersionTestCases(t) - - cases := []struct { + type testCase struct { name string rtReg uint32 imm uint32 expectRt Word - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "lui unsigned", rtReg: 5, imm: 0x1234, expectRt: 0x1234_0000}, {name: "lui signed", rtReg: 7, imm: 0x8765, expectRt: signExtend64(0x8765_0000)}, } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i))) - state := goVm.GetState() - insn := 0b1111<<26 | uint32(tt.rtReg)<<16 | (tt.imm & 0xFFFF) - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[tt.rtReg] = tt.expectRt - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := 0b1111<<26 | uint32(tt.rtReg)<<16 | (tt.imm & 0xFFFF) + storeInsnWithCache(state, goVm, state.GetPC(), insn) + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[tt.rtReg] = tt.expectRt + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_CloClz(t *testing.T) { - versions := GetMipsVersionTestCases(t) - - rsReg := uint32(5) - rdReg := uint32(6) - cases := []struct { + type testCase struct { name string rs Word expectedResult Word funct uint32 - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "clo", rs: 0xFFFF_FFFE, expectedResult: 31, funct: 0b10_0001}, {name: "clo", rs: 0xE000_0000, expectedResult: 3, funct: 0b10_0001}, {name: "clo", rs: 0x8000_0000, expectedResult: 1, funct: 0b10_0001}, @@ -198,41 +204,39 @@ func TestEVM_SingleStep_CloClz(t *testing.T) { {name: "clz, sign-extended", rs: signExtend64(0x8000_0000), expectedResult: 0, funct: 0b10_0000}, } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - // Set up state - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i))) - state := goVm.GetState() - insn := 0b01_1100<<26 | rsReg<<21 | rdReg<<11 | tt.funct - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - state.GetRegistersRef()[rsReg] = tt.rs - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[rdReg] = tt.expectedResult - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + rsReg := uint32(5) + rdReg := uint32(6) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := 0b01_1100<<26 | rsReg<<21 | rdReg<<11 | tt.funct + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetRegistersRef()[rsReg] = tt.rs + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[rdReg] = tt.expectedResult + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_MovzMovn(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string funct uint32 testValue Word shouldSucceed bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "movz, success", funct: uint32(0xa), testValue: 0, shouldSucceed: true}, {name: "movz, failure, testVal=1", funct: uint32(0xa), testValue: 1, shouldSucceed: false}, {name: "movz, failure, testVal=2", funct: uint32(0xa), testValue: 2, shouldSucceed: false}, @@ -240,75 +244,71 @@ func TestEVM_SingleStep_MovzMovn(t *testing.T) { {name: "movn, success, testVal=2", funct: uint32(0xb), testValue: 2, shouldSucceed: true}, {name: "movn, failure", funct: uint32(0xb), testValue: 0, shouldSucceed: false}, } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPC(0), mtutil.WithNextPC(4)) - state := goVm.GetState() - rsReg := uint32(9) - rtReg := uint32(10) - rdReg := uint32(8) - insn := rsReg<<21 | rtReg<<16 | rdReg<<11 | tt.funct - - state.GetRegistersRef()[rtReg] = tt.testValue - state.GetRegistersRef()[rsReg] = Word(0xb) - state.GetRegistersRef()[rdReg] = Word(0xa) - testutil.StoreInstruction(state.GetMemory(), 0, insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if tt.shouldSucceed { - expected.ActiveThread().Registers[rdReg] = state.GetRegistersRef()[rsReg] - } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + pc := arch.Word(0) + rsReg := uint32(9) + rtReg := uint32(10) + rdReg := uint32(8) + val := Word(0xb) + otherVal := Word(0xa) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := rsReg<<21 | rtReg<<16 | rdReg<<11 | tt.funct + state.GetRegistersRef()[rtReg] = tt.testValue + state.GetRegistersRef()[rsReg] = val + state.GetRegistersRef()[rdReg] = otherVal + storeInsnWithCache(state, goVm, pc, insn) + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if tt.shouldSucceed { + expected.ActiveThread().Registers[rdReg] = val } + return ExpectNormalExecution() } + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(pc)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_MfhiMflo(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { - name string - funct uint32 - hi Word - lo Word - }{ - {name: "mflo", funct: uint32(0x12), lo: Word(0xdeadbeef), hi: Word(0x0)}, - {name: "mfhi", funct: uint32(0x10), lo: Word(0x0), hi: Word(0xdeadbeef)}, + type testCase struct { + name string + funct uint32 + hi Word + lo Word + result Word } - expect := Word(0xdeadbeef) - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithLO(tt.lo), mtutil.WithHI(tt.hi)) - state := goVm.GetState() - rdReg := uint32(8) - insn := rdReg<<11 | tt.funct - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - step := state.GetStep() - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[rdReg] = expect - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ + {name: "mflo", funct: uint32(0x12), lo: Word(0xdeadbeef), hi: Word(0x0), result: Word(0xdeadbeef)}, + {name: "mfhi", funct: uint32(0x10), lo: Word(0x0), hi: Word(0xdeadbeef), result: Word(0xdeadbeef)}, + } + + rdReg := uint32(8) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := rdReg<<11 | tt.funct + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetCurrentThread().Cpu.HI = tt.hi + state.GetCurrentThread().Cpu.LO = tt.lo + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[rdReg] = tt.result + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_MulDiv(t *testing.T) { @@ -348,108 +348,109 @@ func TestEVM_SingleStep_MulDiv(t *testing.T) { } func TestEVM_SingleStep_MthiMtlo(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string funct uint32 - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "mtlo", funct: uint32(0x13)}, {name: "mthi", funct: uint32(0x11)}, } + val := Word(0xdeadbeef) - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i))) - state := goVm.GetState() - rsReg := uint32(8) - insn := rsReg<<21 | tt.funct - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - state.GetRegistersRef()[rsReg] = val - step := state.GetStep() - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if tt.funct == 0x11 { - expected.ActiveThread().HI = state.GetRegistersRef()[rsReg] - } else { - expected.ActiveThread().LO = state.GetRegistersRef()[rsReg] - } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + rsReg := uint32(8) + insn := rsReg<<21 | tt.funct + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetRegistersRef()[rsReg] = val + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if tt.funct == 0x11 { + expected.ActiveThread().HI = val + } else { + expected.ActiveThread().LO = val } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_BeqBne(t *testing.T) { - initialPC := Word(800) - negative := func(value Word) uint16 { - flipped := testutil.FlipSign(value) - return uint16(flipped) - } - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string imm uint16 opcode uint32 rs Word rt Word expectedNextPC Word - }{ - // on success, expectedNextPC should be: (imm * 4) + pc + 4 + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "bne, success", opcode: uint32(0x5), imm: 10, rs: Word(0x123), rt: Word(0x456), expectedNextPC: 844}, // bne $t0, $t1, 16 {name: "bne, success, signed-extended offset", opcode: uint32(0x5), imm: negative(3), rs: Word(0x123), rt: Word(0x456), expectedNextPC: 792}, // bne $t0, $t1, 16 {name: "bne, fail", opcode: uint32(0x5), imm: 10, rs: Word(0x123), rt: Word(0x123), expectedNextPC: 808}, // bne $t0, $t1, 16 {name: "beq, success", opcode: uint32(0x4), imm: 10, rs: Word(0x123), rt: Word(0x123), expectedNextPC: 844}, // beq $t0, $t1, 16 {name: "beq, success, sign-extended offset", opcode: uint32(0x4), imm: negative(25), rs: Word(0x123), rt: Word(0x123), expectedNextPC: 704}, // beq $t0, $t1, 16 - {name: "beq, fail", opcode: uint32(0x4), imm: 10, rs: Word(0x123), rt: Word(0x456), expectedNextPC: 808}, // beq $t0, $t1, 16 + {name: "beq, fail", opcode: uint32(0x4), imm: 10, rs: Word(0x123), rt: Word(0x456), expectedNextPC: 808}, } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(initialPC)) - state := goVm.GetState() - rsReg := uint32(9) - rtReg := uint32(8) - insn := tt.opcode<<26 | rsReg<<21 | rtReg<<16 | uint32(tt.imm) - state.GetRegistersRef()[rtReg] = tt.rt - state.GetRegistersRef()[rsReg] = tt.rs - testutil.StoreInstruction(state.GetMemory(), initialPC, insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().NextPC = tt.expectedNextPC - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + pc := Word(800) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + rsReg := uint32(9) + rtReg := uint32(8) + insn := tt.opcode<<26 | rsReg<<21 | rtReg<<16 | uint32(tt.imm) + state.GetRegistersRef()[rtReg] = tt.rt + state.GetRegistersRef()[rsReg] = tt.rs + storeInsnWithCache(state, goVm, pc, insn) + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().NextPC = tt.expectedNextPC + return ExpectNormalExecution() } + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(pc)). + SetExpectations(setExpectations). + Run(t, cases) +} + +func negative(value Word) uint16 { + flipped := testutil.FlipSign(value) + return uint16(flipped) } func TestEVM_SingleStep_SlSr(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string rs Word rt Word rsReg uint32 funct uint16 expectVal Word - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "sll", funct: uint16(4) << 6, rt: Word(0x20), rsReg: uint32(0x0), expectVal: Word(0x200)}, // sll t0, t1, 3 {name: "sll with overflow", funct: uint16(1) << 6, rt: Word(0x8000_0000), rsReg: uint32(0x0), expectVal: 0x0}, {name: "sll with sign extension", funct: uint16(4) << 6, rt: Word(0x0800_0000), rsReg: uint32(0x0), expectVal: signExtend64(0x8000_0000)}, @@ -471,41 +472,30 @@ func TestEVM_SingleStep_SlSr(t *testing.T) { {name: "srav with sign extension", funct: uint16(7), rt: Word(0xdeafbeef), rs: Word(12), rsReg: uint32(0xa), expectVal: signExtend64(Word(0xfffdeafb))}, // srav t0, t1, t2 } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPC(0), mtutil.WithNextPC(4)) - state := goVm.GetState() - var insn uint32 - rtReg := uint32(0x9) - rdReg := uint32(0x8) - insn = tt.rsReg<<21 | rtReg<<16 | rdReg<<11 | uint32(tt.funct) - state.GetRegistersRef()[rtReg] = tt.rt - state.GetRegistersRef()[tt.rsReg] = tt.rs - testutil.StoreInstruction(state.GetMemory(), 0, insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - - expected.ActiveThread().Registers[rdReg] = tt.expectVal - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + pc := Word(0) + rdReg := uint32(0x8) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + rtReg := uint32(0x9) + insn := tt.rsReg<<21 | rtReg<<16 | rdReg<<11 | uint32(tt.funct) + state.GetRegistersRef()[rtReg] = tt.rt + state.GetRegistersRef()[tt.rsReg] = tt.rs + storeInsnWithCache(state, goVm, pc, insn) + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[rdReg] = tt.expectVal + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(pc)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_JrJalr(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string funct uint16 rsReg uint32 @@ -515,82 +505,90 @@ func TestEVM_SingleStep_JrJalr(t *testing.T) { nextPC Word expectLink bool errorMsg string - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "jr", funct: uint16(0x8), rsReg: 8, jumpTo: 0x34, pc: 0, nextPC: 4}, // jr t0 {name: "jr, delay slot", funct: uint16(0x8), rsReg: 8, jumpTo: 0x34, pc: 0, nextPC: 8, errorMsg: "jump in delay slot"}, // jr t0 {name: "jalr", funct: uint16(0x9), rsReg: 8, jumpTo: 0x34, rdReg: uint32(0x9), expectLink: true, pc: 0, nextPC: 4}, // jalr t1, t0 {name: "jalr, delay slot", funct: uint16(0x9), rsReg: 8, jumpTo: 0x34, rdReg: uint32(0x9), expectLink: true, pc: 0, nextPC: 100, errorMsg: "jump in delay slot"}, // jalr t1, t0 } - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPC(tt.pc), mtutil.WithNextPC(tt.nextPC)) - state := goVm.GetState() - insn := tt.rsReg<<21 | tt.rdReg<<11 | uint32(tt.funct) - state.GetRegistersRef()[tt.rsReg] = tt.jumpTo - testutil.StoreInstruction(state.GetMemory(), 0, insn) - step := state.GetStep() - - if tt.errorMsg != "" { - proofData := v.ProofGenerator(t, goVm.GetState()) - errorMatcher := testutil.StringErrorMatcher(tt.errorMsg) - require.Panics(t, func() { _, _ = goVm.Step(false) }) - testutil.AssertEVMReverts(t, state, v.Contracts, nil, proofData, errorMatcher) - } else { - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().NextPC = tt.jumpTo - if tt.expectLink { - expected.ActiveThread().Registers[tt.rdReg] = state.GetPC() + 8 - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - } - }) + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := tt.rsReg<<21 | tt.rdReg<<11 | uint32(tt.funct) + state.GetRegistersRef()[tt.rsReg] = tt.jumpTo + state.GetCurrentThread().Cpu.PC = tt.pc + state.GetCurrentThread().Cpu.NextPC = tt.nextPC + storeInsnWithCache(state, goVm, tt.pc, insn) + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + if tt.errorMsg != "" { + return ExpectVmPanic(tt.errorMsg, tt.errorMsg) + } else { + expected.ExpectStep() + expected.ActiveThread().NextPC = tt.jumpTo + if tt.expectLink { + expected.ActiveThread().Registers[tt.rdReg] = tt.pc + 8 + } + return ExpectNormalExecution() } } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SingleStep_Sync(t *testing.T) { - versions := GetMipsVersionTestCases(t) - syncInsn := uint32(0x0000_000F) - for _, v := range versions { - testName := fmt.Sprintf("Sync (%v)", v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(248))) - state := goVm.GetState() - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syncInsn) - step := state.GetStep() + type testCase struct { + name string + } - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() + testNamer := func(tc testCase) string { + return tc.name + } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + cases := []testCase{ + {name: "simple"}, + } + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + syncInsn := uint32(0x0000_000F) + storeInsnWithCache(state, goVm, state.GetPC(), syncInsn) + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_MMap(t *testing.T) { - versions := GetMipsVersionTestCases(t) - cases := []struct { + type testCase struct { name string heap arch.Word address arch.Word size arch.Word shouldFail bool expectedHeap arch.Word - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "Increment heap by max value", heap: program.HEAP_START, address: 0, size: ^arch.Word(0), shouldFail: true}, {name: "Increment heap to 0", heap: program.HEAP_START, address: 0, size: ^arch.Word(0) - program.HEAP_START + 1, shouldFail: true}, {name: "Increment heap to previous page", heap: program.HEAP_START, address: 0, size: ^arch.Word(0) - program.HEAP_START - memory.PageSize + 1, shouldFail: true}, @@ -602,44 +600,36 @@ func TestEVM_MMap(t *testing.T) { {name: "Request specific address", heap: program.HEAP_START, address: 0x50_00_00_00, size: 0, shouldFail: false, expectedHeap: program.HEAP_START}, } - for _, v := range versions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithHeap(c.heap)) - state := goVm.GetState() - - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysMmap - state.GetRegistersRef()[4] = c.address - state.GetRegistersRef()[5] = c.size - step := state.GetStep() - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if c.shouldFail { - expected.ActiveThread().Registers[2] = exec.MipsEINVAL - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } else { - expected.Heap = c.expectedHeap - if c.address == 0 { - expected.ActiveThread().Registers[2] = state.GetHeap() - expected.ActiveThread().Registers[7] = 0 - } else { - expected.ActiveThread().Registers[2] = c.address - expected.ActiveThread().Registers[7] = 0 - } - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysMmap + state.GetRegistersRef()[4] = c.address + state.GetRegistersRef()[5] = c.size + state.Heap = c.heap + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if c.shouldFail { + expected.ActiveThread().Registers[2] = exec.MipsEINVAL + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + } else { + expected.Heap = c.expectedHeap + if c.address == 0 { + expected.ActiveThread().Registers[2] = c.heap + expected.ActiveThread().Registers[7] = 0 + } else { + expected.ActiveThread().Registers[2] = c.address + expected.ActiveThread().Registers[7] = 0 + } } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysGetRandom_isImplemented(t *testing.T) { @@ -703,15 +693,15 @@ func TestEVM_SysGetRandom(t *testing.T) { step := uint64(0x1a2b3c4d5e6f7531) - 1 randomData := arch.Word(0x4141302768c9e9d0) - initState := func(testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) + initState := func(t require.TestingT, testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetMemory().SetWord(effAddr, startingMemory) state.GetRegistersRef()[register.RegV0] = arch.SysGetRandom state.GetRegistersRef()[register.RegA0] = effAddr + testCase.bufAddrOffset state.GetRegistersRef()[register.RegA1] = testCase.bufLen } - setExpectations := func(testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { isNoop := !versions.FeaturesForVersion(vm.Version).SupportWorkingSysGetRandom expectedMemory := testCase.expectedRandDataMask&randomData | ^testCase.expectedRandDataMask&startingMemory @@ -730,7 +720,10 @@ func TestEVM_SysGetRandom(t *testing.T) { NewDiffTester(testNamer). InitState(initState, mtutil.WithStep(step)). SetExpectations(setExpectations). - Run(t, cases) + Run(t, cases, SkipAutomaticMemoryReservationTests()) + //Was getting failure from the “automatic memory reservation” modifier that the DiffTester adds. + //I think the mod executes extra setup on a different thread before the syscall, which I think bumps the step counter. + //Since sys_getrandom seeds splitmix64 with the incremented step, I think those extra steps shift the seed. } func TestEVM_SysWriteHint(t *testing.T) { @@ -891,8 +884,8 @@ func TestEVM_SysWriteHint(t *testing.T) { }, } - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.LastHint = tt.lastHint state.GetRegistersRef()[2] = arch.SysWrite state.GetRegistersRef()[4] = exec.FdHintWrite @@ -903,7 +896,7 @@ func TestEVM_SysWriteHint(t *testing.T) { require.NoError(t, err) } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expected.ExpectStep() expected.LastHint = tt.expectedLastHint expected.ActiveThread().Registers[2] = arch.Word(tt.bytesToWrite) // Return count of bytes written @@ -911,7 +904,7 @@ func TestEVM_SysWriteHint(t *testing.T) { return ExpectNormalExecution() } - postCheck := func(t require.TestingT, tt testCase, vm VersionedVMTestCase, deps *TestDependencies) { + postCheck := func(t require.TestingT, tt testCase, vm VersionedVMTestCase, deps *TestDependencies, wit *mipsevm.StepWitness) { trackingOracle, ok := deps.po.(*testutil.HintTrackingOracle) require.True(t, ok) require.Equal(t, tt.expectedHints, trackingOracle.Hints()) @@ -944,21 +937,22 @@ func TestEVM_Fault(t *testing.T) { {name: "illegal instruction", nextPC: 0, insn: 0b111110 << 26, evmErrStr: "invalid instruction", goPanicValue: "invalid instruction: f8000000"}, {name: "branch in delay-slot", nextPC: 8, insn: 0x11_02_00_03, evmErrStr: "branch in delay slot", goPanicValue: "branch in delay slot"}, {name: "jump in delay-slot", nextPC: 8, insn: 0x0c_00_00_0c, evmErrStr: "jump in delay slot", goPanicValue: "jump in delay slot"}, - {name: "misaligned instruction", pc: 1, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: fmt.Errorf("invalid pc: 1")}, - {name: "misaligned instruction", pc: 2, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: fmt.Errorf("invalid pc: 2")}, - {name: "misaligned instruction", pc: 3, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: fmt.Errorf("invalid pc: 3")}, - {name: "misaligned instruction", pc: 5, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: fmt.Errorf("invalid pc: 5")}, + + {name: "misaligned instruction", pc: 1, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: "unaligned instruction fetch: PC = 0x1"}, + {name: "misaligned instruction", pc: 2, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: "unaligned instruction fetch: PC = 0x2"}, + {name: "misaligned instruction", pc: 3, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: "unaligned instruction fetch: PC = 0x3"}, + {name: "misaligned instruction", pc: 5, nextPC: 4, insn: 0b110111_00001_00001 << 16, evmErrSig: "InvalidPC()", goPanicValue: "unaligned instruction fetch: PC = 0x5"}, } - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.GetMemory(), 0, tt.insn) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, 0, tt.insn) state.GetCurrentThread().Cpu.PC = tt.pc state.GetCurrentThread().Cpu.NextPC = tt.nextPC // set the return address ($ra) to jump into when test completes state.GetRegistersRef()[31] = testutil.EndAddr } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { // Memory is accessed when processing illegal instructions, so we need to make sure to append a memory proof // See: https://github.com/ethereum-optimism/optimism/blob/a08b5b343a0005c6308566cd8afa810dd67e0e8f/cannon/mipsevm/exec/mips_instructions.go#L102-L105 rsReg := (tt.insn >> 21) & 0x1F @@ -1080,7 +1074,7 @@ func TestEVM_SyscallEventFdProgram(t *testing.T) { state := goVm.GetState() start := time.Now() - for i := 0; i < 500_000; i++ { + for i := 0; i < 550_000; i++ { step := goVm.GetState().GetStep() if goVm.GetState().GetExited() { break @@ -1148,7 +1142,7 @@ func TestEVM_HelloProgram(t *testing.T) { state := goVm.GetState() start := time.Now() - for i := 0; i < 450_000; i++ { + for i := 0; i < 500_000; i++ { step := goVm.GetState().GetStep() if goVm.GetState().GetExited() { break diff --git a/cannon/mipsevm/tests/evm_multithreaded64_test.go b/cannon/mipsevm/tests/evm_multithreaded64_test.go index 662d559711b93..19036552cef9a 100644 --- a/cannon/mipsevm/tests/evm_multithreaded64_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded64_test.go @@ -4,7 +4,6 @@ package tests import ( "encoding/binary" "fmt" - "os" "slices" "testing" @@ -21,9 +20,16 @@ import ( ) func TestEVM_MT64_LL(t *testing.T) { - memVal := Word(0x11223344_55667788) - memValNeg := Word(0xF1223344_F5667788) - cases := []struct { + type llVariation struct { + name string + withExistingReservation bool + } + llVariations := []llVariation{ + {"with existing reservation", true}, + {"without existing reservation", false}, + } + + type baseTest struct { name string base Word offset int @@ -31,7 +37,11 @@ func TestEVM_MT64_LL(t *testing.T) { memVal Word retReg int retVal Word - }{ + } + + memVal := Word(0x11223344_55667788) + memValNeg := Word(0xF1223344_F5667788) + baseTests := []baseTest{ {name: "8-byte-aligned addr", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memVal, retVal: 0x11223344, retReg: 5}, {name: "8-byte-aligned addr, neg value", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memValNeg, retVal: 0xFFFFFFFF_F1223344, retReg: 5}, {name: "8-byte-aligned addr, extra bits", base: 0x01, offset: 0x0109, addr: 0x010A, memVal: memVal, retVal: 0x11223344, retReg: 5}, @@ -44,55 +54,50 @@ func TestEVM_MT64_LL(t *testing.T) { {name: "4-byte-aligned addr, addr signed extended w overflow", base: 0x1000_0001, offset: 0xFF03, addr: 0x0000_0000_0FFF_FF04, memVal: memVal, retVal: 0x55667788, retReg: 5}, {name: "Return register set to 0", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memVal, retVal: 0x11223344, retReg: 0}, } - versions := GetMipsVersionTestCases(t) - for _, v := range versions { - for i, c := range cases { - for _, withExistingReservation := range []bool{true, false} { - tName := fmt.Sprintf("%v (vm = %v, withExistingReservation = %v)", c.name, v.Name, withExistingReservation) - t.Run(tName, func(t *testing.T) { - effAddr := arch.AddressMask & c.addr - - retReg := c.retReg - baseReg := 6 - insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (retReg & 0x1F << 16) | (0xFFFF & c.offset)) - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(0x40)) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - // Set up state - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - state.GetMemory().SetWord(effAddr, c.memVal) - state.GetRegistersRef()[baseReg] = c.base - if withExistingReservation { - state.LLReservationStatus = multithreaded.LLStatusActive32bit - state.LLAddress = c.addr + 1 - state.LLOwnerThread = 123 - } else { - state.LLReservationStatus = multithreaded.LLStatusNone - state.LLAddress = 0 - state.LLOwnerThread = 0 - } - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.LLReservationStatus = multithreaded.LLStatusActive32bit - expected.LLAddress = c.addr - expected.LLOwnerThread = state.GetCurrentThread().ThreadId - if retReg != 0 { - expected.ActiveThread().Registers[retReg] = c.retVal - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts) - }) - } + + type testCase = testutil.TestCaseVariation[baseTest, llVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + cases := testutil.TestVariations(baseTests, llVariations) + + initState := func(t require.TestingT, testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := testCase.Base + retReg := c.retReg + baseReg := 6 + insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (retReg & 0x1F << 16) | (0xFFFF & c.offset)) + + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetMemory().SetWord(testutil.EffAddr(c.addr), c.memVal) + state.GetRegistersRef()[baseReg] = c.base + if testCase.Variation.withExistingReservation { + state.LLReservationStatus = multithreaded.LLStatusActive32bit + state.LLAddress = c.addr + 1 + state.LLOwnerThread = 123 + } else { + state.LLReservationStatus = multithreaded.LLStatusNone + state.LLAddress = 0 + state.LLOwnerThread = 0 } } + + setExpectations := func(t require.TestingT, testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.LLReservationStatus = multithreaded.LLStatusActive32bit + expected.LLAddress = testCase.Base.addr + expected.LLOwnerThread = expected.ActiveThreadId() + + retReg := testCase.Base.retReg + if retReg != 0 { + expected.ActiveThread().Registers[retReg] = testCase.Base.retVal + } + return ExpectNormalExecution() + } + + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(0x40)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_MT64_SC(t *testing.T) { @@ -141,7 +146,7 @@ func TestEVM_MT64_SC(t *testing.T) { } cases := testutil.TestVariations(baseTests, llVariations) - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { c := tt.Base llVar := tt.Variation @@ -167,7 +172,7 @@ func TestEVM_MT64_SC(t *testing.T) { // Setup state state.GetCurrentThread().ThreadId = c.threadId - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) + storeInsnWithCache(state, goVm, state.GetPC(), insn) state.GetRegistersRef()[baseReg] = c.base state.GetRegistersRef()[rtReg] = c.value state.LLReservationStatus = llVar.llReservationStatus @@ -175,7 +180,7 @@ func TestEVM_MT64_SC(t *testing.T) { state.LLOwnerThread = llOwnerThread } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { c := tt.Base llVar := tt.Variation @@ -201,16 +206,26 @@ func TestEVM_MT64_SC(t *testing.T) { } func TestEVM_MT64_LLD(t *testing.T) { + type llVariation struct { + name string + withExistingReservation bool + } + llVariations := []llVariation{ + {"with existing reservation", true}, + {"without existing reservation", false}, + } + memVal := Word(0x11223344_55667788) memValNeg := Word(0xF1223344_F5667788) - cases := []struct { + type baseTest struct { name string base Word offset int addr Word memVal Word retReg int - }{ + } + baseTests := []baseTest{ {name: "Aligned addr", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memVal, retReg: 5}, {name: "Aligned addr, neg value", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memValNeg, retReg: 5}, {name: "Unaligned addr, offset=1", base: 0x01, offset: 0x0100, addr: 0x0101, memVal: memVal, retReg: 5}, @@ -224,55 +239,49 @@ func TestEVM_MT64_LLD(t *testing.T) { {name: "Aligned addr, signed extended w overflow", base: 0x1000_0001, offset: 0xFF07, addr: 0x0000_0000_0FFF_FF08, memVal: memVal, retReg: 5}, {name: "Return register set to 0", base: 0x01, offset: 0x0107, addr: 0x0108, memVal: memVal, retReg: 0}, } - versions := GetMipsVersionTestCases(t) - for _, v := range versions { - for i, c := range cases { - for _, withExistingReservation := range []bool{true, false} { - tName := fmt.Sprintf("%v (vm = %v, withExistingReservation = %v)", c.name, v.Name, withExistingReservation) - t.Run(tName, func(t *testing.T) { - effAddr := arch.AddressMask & c.addr - - retReg := c.retReg - baseReg := 6 - insn := uint32((0b11_0100 << 26) | (baseReg & 0x1F << 21) | (retReg & 0x1F << 16) | (0xFFFF & c.offset)) - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(0x40)) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - // Set up state - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - state.GetMemory().SetWord(effAddr, c.memVal) - state.GetRegistersRef()[baseReg] = c.base - if withExistingReservation { - state.LLReservationStatus = multithreaded.LLStatusActive64bit - state.LLAddress = c.addr + 1 - state.LLOwnerThread = 123 - } else { - state.LLReservationStatus = multithreaded.LLStatusNone - state.LLAddress = 0 - state.LLOwnerThread = 0 - } - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.LLReservationStatus = multithreaded.LLStatusActive64bit - expected.LLAddress = c.addr - expected.LLOwnerThread = state.GetCurrentThread().ThreadId - if retReg != 0 { - expected.ActiveThread().Registers[retReg] = c.memVal - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts) - }) - } + + type testCase = testutil.TestCaseVariation[baseTest, llVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + cases := testutil.TestVariations(baseTests, llVariations) + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := tt.Base + baseReg := 6 + insn := uint32((0b11_0100 << 26) | (baseReg & 0x1F << 21) | (c.retReg & 0x1F << 16) | (0xFFFF & c.offset)) + + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetMemory().SetWord(testutil.EffAddr(c.addr), c.memVal) + state.GetRegistersRef()[baseReg] = c.base + if tt.Variation.withExistingReservation { + state.LLReservationStatus = multithreaded.LLStatusActive64bit + state.LLAddress = c.addr + 1 + state.LLOwnerThread = 123 + } else { + state.LLReservationStatus = multithreaded.LLStatusNone + state.LLAddress = 0 + state.LLOwnerThread = 0 } + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + c := tt.Base + expected.ExpectStep() + expected.LLReservationStatus = multithreaded.LLStatusActive64bit + expected.LLAddress = c.addr + expected.LLOwnerThread = expected.ActiveThreadId() + if c.retReg != 0 { + expected.ActiveThread().Registers[c.retReg] = c.memVal + } + return ExpectNormalExecution() + } + + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(0x40)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_MT64_SCD(t *testing.T) { @@ -322,7 +331,7 @@ func TestEVM_MT64_SCD(t *testing.T) { cases := testutil.TestVariations(baseTests, llVariations) value := Word(0x11223344_55667788) - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { c := tt.Base llVar := tt.Variation @@ -347,7 +356,7 @@ func TestEVM_MT64_SCD(t *testing.T) { insn := uint32((0b11_1100 << 26) | (baseReg & 0x1F << 21) | (c.rtReg & 0x1F << 16) | (0xFFFF & c.offset)) state.GetCurrentThread().ThreadId = c.threadId - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) + storeInsnWithCache(state, goVm, state.GetPC(), insn) state.GetRegistersRef()[baseReg] = c.base state.GetRegistersRef()[c.rtReg] = value state.LLReservationStatus = llVar.llReservationStatus @@ -355,7 +364,7 @@ func TestEVM_MT64_SCD(t *testing.T) { state.LLOwnerThread = llOwnerThread } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { c := tt.Base llVar := tt.Variation @@ -440,18 +449,18 @@ func TestEVM_MT_SysRead_Preimage64(t *testing.T) { preimageValue := make([]byte, 0, 8) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x12_34_56_78) preimageValue = binary.BigEndian.AppendUint32(preimageValue, 0x98_76_54_32) - initState := func(testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { state.PreimageKey = testutil.Keccak256Preimage(preimageValue) state.PreimageOffset = testCase.preimageOffset state.GetRegistersRef()[2] = arch.SysRead state.GetRegistersRef()[4] = exec.FdPreimageRead state.GetRegistersRef()[5] = testCase.addr state.GetRegistersRef()[6] = testCase.count - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetMemory().SetWord(testutil.EffAddr(testCase.addr), testCase.prestateMem) } - setExpectations := func(testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expected.ExpectStep() expected.ActiveThread().Registers[2] = testCase.writeLen expected.ActiveThread().Registers[7] = 0 // no error @@ -473,90 +482,45 @@ func TestEVM_MT_SysRead_Preimage64(t *testing.T) { Run(t, cases, WithPreimageOracle(po)) } -func TestEVM_MT_SysRead_FromEventFd(t *testing.T) { +func TestEVM_MT_SysReadWrite_WithEventFd(t *testing.T) { t.Parallel() - vmVersions := GetMipsVersionTestCases(t) - for i, ver := range vmVersions { - t.Run(ver.Name, func(t *testing.T) { - t.Parallel() - addr := Word(0x00_00_FF_00) - effAddr := arch.AddressMask & addr - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i))) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - // Define LL-related params - llAddress := effAddr - llOwnerThread := state.GetCurrentThread().ThreadId - - // Set up state - state.GetRegistersRef()[2] = arch.SysRead - state.GetRegistersRef()[4] = exec.FdEventFd - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = 1 - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - state.LLReservationStatus = multithreaded.LLStatusNone - state.LLAddress = llAddress - state.LLOwnerThread = llOwnerThread - state.GetMemory().SetWord(effAddr, Word(0x12_EE_EE_EE_FF_FF_FF_FF)) - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = exec.MipsEAGAIN - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + type testCase struct { + name string + syscallNum Word } -} -func TestEVM_MT_SysWrite_ToEventFd(t *testing.T) { - t.Parallel() - vmVersions := GetMipsVersionTestCases(t) - for i, ver := range vmVersions { - t.Run(ver.Name, func(t *testing.T) { - t.Parallel() - addr := Word(0x00_00_FF_00) - effAddr := arch.AddressMask & addr - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i))) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - // Define LL-related params - llAddress := effAddr - llOwnerThread := state.GetCurrentThread().ThreadId - - // Set up state - state.GetRegistersRef()[2] = arch.SysWrite - state.GetRegistersRef()[4] = exec.FdEventFd - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = 1 - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - state.LLReservationStatus = multithreaded.LLStatusNone - state.LLAddress = llAddress - state.LLOwnerThread = llOwnerThread - state.GetMemory().SetWord(effAddr, Word(0x12_EE_EE_EE_FF_FF_FF_FF)) + testNamer := func(tc testCase) string { + return tc.name + } - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = exec.MipsEAGAIN - expected.ActiveThread().Registers[7] = exec.SysErrorSignal + cases := []testCase{ + {name: "SysRead", syscallNum: arch.SysRead}, + {name: "SysWrite", syscallNum: arch.SysWrite}, + } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + addr := Word(0x00_00_FF_00) + state.GetRegistersRef()[2] = tt.syscallNum + state.GetRegistersRef()[4] = exec.FdEventFd + state.GetRegistersRef()[5] = addr + state.GetRegistersRef()[6] = 1 + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + // Set a memory value to ensure that memory at the target address is not modified + state.GetMemory().SetWord(addr, Word(0x12_EE_EE_EE_FF_FF_FF_FF)) + } - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = exec.MipsEAGAIN + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_MT_StoreOpsClearMemReservation64(t *testing.T) { @@ -601,16 +565,16 @@ func TestEVM_MT_StoreOpsClearMemReservation64(t *testing.T) { //rt := Word(0x12_34_56_78_12_34_56_78) baseReg := uint32(5) rtReg := uint32(6) - initState := func(testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, testCase testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { insn := uint32((testCase.opcode << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & testCase.offset)) state.GetRegistersRef()[rtReg] = rt state.GetRegistersRef()[baseReg] = testCase.base - testutil.StoreInstruction(state.GetMemory(), pc, insn) + storeInsnWithCache(state, goVm, pc, insn) state.GetMemory().SetWord(testCase.effAddr, testCase.preMem) } - setExpectations := func(testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, testCase testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expected.ExpectStep() expected.ExpectMemoryWrite(testCase.effAddr, testCase.postMem) return ExpectNormalExecution() @@ -663,9 +627,6 @@ var NoopSyscalls64 = map[string]uint32{ func getNoopSyscalls64(vmVersion versions.StateVersion) map[string]uint32 { noOpCalls := maps.Clone(NoopSyscalls64) features := versions.FeaturesForVersion(vmVersion) - if !features.SupportNoopMprotect { - delete(noOpCalls, "SysMprotect") - } if features.SupportWorkingSysGetRandom { delete(noOpCalls, "SysGetRandom") } @@ -726,12 +687,12 @@ func TestEVM_UndefinedSyscall(t *testing.T) { {"SysLlseek", arch.SysLlseek}, } - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = Word(tt.syscallNum) } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { goPanic := fmt.Sprintf("unrecognized syscall: %d", tt.syscallNum) evmErr := "unimplemented syscall" return ExpectVmPanic(goPanic, evmErr) diff --git a/cannon/mipsevm/tests/evm_multithreaded_test.go b/cannon/mipsevm/tests/evm_multithreaded_test.go index 3d4ab48786500..1f455fc63d474 100644 --- a/cannon/mipsevm/tests/evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/evm_multithreaded_test.go @@ -3,7 +3,6 @@ package tests import ( "fmt" - "os" "testing" "github.com/stretchr/testify/require" @@ -15,20 +14,27 @@ import ( mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/register" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" - "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" ) type Word = arch.Word func TestEVM_MT_LL(t *testing.T) { + type testVariation struct { + name string + withExistingReservation bool + } + testVariations := []testVariation{ + {"with existing reservation", true}, + {"without existing reservation", false}, + } + // Set up some test values that will be reused posValue := uint64(0xAAAA_BBBB_1122_3344) posValueRet := uint64(0x1122_3344) negValue := uint64(0x1111_1111_8877_6655) negRetValue := uint64(0xFFFF_FFFF_8877_6655) // Sign extended version of negValue - // Note: parameters are written as 64-bit values. For 32-bit architectures, these values are downcast to 32-bit - cases := []struct { + type baseTest struct { name string base uint64 offset int @@ -36,7 +42,8 @@ func TestEVM_MT_LL(t *testing.T) { memValue uint64 retVal uint64 rtReg int - }{ + } + baseTests := []baseTest{ {name: "Aligned addr", base: 0x01, offset: 0x0133, expectedAddr: 0x0134, memValue: posValue, retVal: posValueRet, rtReg: 5}, {name: "Aligned addr, negative value", base: 0x01, offset: 0x0133, expectedAddr: 0x0134, memValue: negValue, retVal: negRetValue, rtReg: 5}, {name: "Aligned addr, addr signed extended", base: 0x01, offset: 0xFF33, expectedAddr: 0xFFFF_FFFF_FFFF_FF34, memValue: posValue, retVal: posValueRet, rtReg: 5}, @@ -44,53 +51,50 @@ func TestEVM_MT_LL(t *testing.T) { {name: "Unaligned addr, addr sign extended w overflow", base: 0xFF12_0001, offset: 0x8405, expectedAddr: 0xFF11_8406, memValue: posValue, retVal: posValueRet, rtReg: 5}, {name: "Return register set to 0", base: 0xFF12_0001, offset: 0x7404, expectedAddr: 0xFF12_7405, memValue: posValue, retVal: 0, rtReg: 0}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - for _, withExistingReservation := range []bool{true, false} { - tName := fmt.Sprintf("%v (vm = %v, withExistingReservation = %v)", c.name, ver.Name, withExistingReservation) - t.Run(tName, func(t *testing.T) { - rtReg := c.rtReg - baseReg := 6 - insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (rtReg & 0x1F << 16) | (0xFFFF & c.offset)) - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(0x40)) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - // Set up state - testutil.SetMemoryUint64(t, state.GetMemory(), Word(c.expectedAddr), c.memValue) - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) - state.GetRegistersRef()[baseReg] = Word(c.base) - if withExistingReservation { - state.LLReservationStatus = multithreaded.LLStatusActive32bit - state.LLAddress = Word(c.expectedAddr + 1) - state.LLOwnerThread = 123 - } else { - state.LLReservationStatus = multithreaded.LLStatusNone - state.LLAddress = 0 - state.LLOwnerThread = 0 - } - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.LLReservationStatus = multithreaded.LLStatusActive32bit - expected.LLAddress = Word(c.expectedAddr) - expected.LLOwnerThread = state.GetCurrentThread().ThreadId - if rtReg != 0 { - expected.ActiveThread().Registers[rtReg] = Word(c.retVal) - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } + + type testCase = testutil.TestCaseVariation[baseTest, testVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + cases := testutil.TestVariations(baseTests, testVariations) + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := tt.Base + + baseReg := 6 + insn := uint32((0b11_0000 << 26) | (baseReg & 0x1F << 21) | (c.rtReg & 0x1F << 16) | (0xFFFF & c.offset)) + + // Set up state + testutil.SetMemoryUint64(t, state.GetMemory(), Word(c.expectedAddr), c.memValue) + storeInsnWithCache(state, goVm, state.GetPC(), insn) + state.GetRegistersRef()[baseReg] = Word(c.base) + if tt.Variation.withExistingReservation { + state.LLReservationStatus = multithreaded.LLStatusActive32bit + state.LLAddress = Word(c.expectedAddr + 1) + state.LLOwnerThread = 123 + } else { + state.LLReservationStatus = multithreaded.LLStatusNone + state.LLAddress = 0 + state.LLOwnerThread = 0 } } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + c := tt.Base + expected.ExpectStep() + expected.LLReservationStatus = multithreaded.LLStatusActive32bit + expected.LLAddress = Word(c.expectedAddr) + expected.LLOwnerThread = expected.ActiveThreadId() + if c.rtReg != 0 { + expected.ActiveThread().Registers[c.rtReg] = Word(c.retVal) + } + return ExpectNormalExecution() + } + + NewDiffTester(testNamer). + InitState(initState, mtutil.WithPCAndNextPC(0x40)). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_MT_SC(t *testing.T) { @@ -135,7 +139,7 @@ func TestEVM_MT_SC(t *testing.T) { // Set up some test values that will be reused memValue := uint64(0x1122_3344_5566_7788) - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { c := tt.Base llVar := tt.Variation @@ -160,7 +164,7 @@ func TestEVM_MT_SC(t *testing.T) { insn := uint32((0b11_1000 << 26) | (baseReg & 0x1F << 21) | (c.rtReg & 0x1F << 16) | (0xFFFF & c.offset)) testutil.SetMemoryUint64(t, state.GetMemory(), Word(c.expectedAddr), memValue) state.GetCurrentThread().ThreadId = c.threadId - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), insn) + storeInsnWithCache(state, goVm, state.GetPC(), insn) state.GetRegistersRef()[baseReg] = c.base state.GetRegistersRef()[c.rtReg] = Word(c.storeValue) state.LLReservationStatus = llVar.llReservationStatus @@ -168,7 +172,7 @@ func TestEVM_MT_SC(t *testing.T) { state.LLOwnerThread = llOwnerThread } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { c := tt.Base llVar := tt.Variation @@ -194,12 +198,17 @@ func TestEVM_MT_SC(t *testing.T) { } func TestEVM_SysClone_FlagHandling(t *testing.T) { - - cases := []struct { + type testCase struct { name string flags Word valid bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {"the supported flags bitmask", exec.ValidCloneFlags, true}, {"no flags", 0, false}, {"all flags", ^Word(0), false}, @@ -211,41 +220,33 @@ func TestEVM_SysClone_FlagHandling(t *testing.T) { {"multiple unsupported flags", exec.CloneUntraced | exec.CloneParentSettid, false}, } - for _, c := range cases { - c := c - for _, version := range GetMipsVersionTestCases(t) { - version := version - t.Run(fmt.Sprintf("%v-%v", version.Name, c.name), func(t *testing.T) { - state := multithreaded.CreateEmptyState() - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysClone // Set syscall number - state.GetRegistersRef()[4] = c.flags // Set first argument - curStep := state.Step - - var err error - var stepWitness *mipsevm.StepWitness - goVm := multithreaded.NewInstrumentedState(state, nil, os.Stdout, os.Stderr, nil, nil, versions.FeaturesForVersion(version.Version)) - if !c.valid { - // The VM should exit - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - require.Equal(t, curStep+1, state.GetStep()) - require.Equal(t, true, goVm.GetState().GetExited()) - require.Equal(t, uint8(mipsevm.VMStatusPanic), goVm.GetState().GetExitCode()) - require.Equal(t, 1, state.ThreadCount()) - } else { - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - require.Equal(t, curStep+1, state.GetStep()) - require.Equal(t, false, goVm.GetState().GetExited()) - require.Equal(t, uint8(0), goVm.GetState().GetExitCode()) - require.Equal(t, 2, state.ThreadCount()) - } - - testutil.ValidateEVM(t, stepWitness, curStep, goVm, multithreaded.GetStateHashFn(), version.Contracts) - }) + stackPtr := Word(204) + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + mtutil.InitializeSingleThread(r.Intn(10000), state, true) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysClone // Set syscall number + state.GetRegistersRef()[4] = c.flags // Set first argument + state.GetRegistersRef()[5] = stackPtr // a1 - the stack pointer + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + if !c.valid { + // The VM should exit + expected.Step += 1 + expected.ExpectNoContextSwitch() + expected.Exited = true + expected.ExitCode = uint8(mipsevm.VMStatusPanic) + } else { + // Otherwise, we should clone the thread as normal + setCloneExpectations(expected, stackPtr) } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysClone_Successful(t *testing.T) { @@ -264,9 +265,9 @@ func TestEVM_SysClone_Successful(t *testing.T) { } stackPtr := Word(100) - initState := func(c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { mtutil.InitializeSingleThread(r.Intn(10000), state, c.traverseRight) - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = arch.SysClone // the syscall number state.GetRegistersRef()[4] = exec.ValidCloneFlags // a0 - first argument, clone flags state.GetRegistersRef()[5] = stackPtr // a1 - the stack pointer @@ -275,32 +276,8 @@ func TestEVM_SysClone_Successful(t *testing.T) { require.Equal(t, Word(1), state.NextThreadId) } - setExpectations := func(c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { - expected.Step += 1 - expectedNewThread := expected.ExpectNewThread() - expected.ActiveThreadId = expectedNewThread.ThreadId - expected.StepsSinceLastContextSwitch = 0 - if c.traverseRight { - expected.RightStackSize += 1 - } else { - expected.LeftStackSize += 1 - } - - // Original thread expectations - prestateNextPC := expected.ActiveThread().NextPC - expected.PrestateActiveThread().PC = prestateNextPC - expected.PrestateActiveThread().NextPC = prestateNextPC + 4 - expected.PrestateActiveThread().Registers[2] = 1 - expected.PrestateActiveThread().Registers[7] = 0 - // New thread expectations - expectedNewThread.PC = prestateNextPC - expectedNewThread.NextPC = prestateNextPC + 4 - expectedNewThread.ThreadId = 1 - expectedNewThread.Registers[register.RegSyscallRet1] = 0 - expectedNewThread.Registers[register.RegSyscallErrno] = 0 - expectedNewThread.Registers[register.RegSP] = stackPtr - - return ExpectNormalExecution() + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + return setCloneExpectations(expected, stackPtr) } NewDiffTester(testNamer). @@ -309,161 +286,162 @@ func TestEVM_SysClone_Successful(t *testing.T) { Run(t, cases) } +// setCloneExpectations sets state expectations assuming we start with 1 thread +func setCloneExpectations(expected *mtutil.ExpectedState, stackPointer Word) ExpectedExecResult { + expected.Step += 1 + expectedNewThread := expected.ExpectNewThread() + expected.ExpectActiveThreadId(expectedNewThread.ThreadId) + expected.ExpectContextSwitch() + + // Original thread expectations + prestateNextPC := expected.PrestateActiveThread().NextPC + expected.PrestateActiveThread().PC = prestateNextPC + expected.PrestateActiveThread().NextPC = prestateNextPC + 4 + expected.PrestateActiveThread().Registers[2] = 1 + expected.PrestateActiveThread().Registers[7] = 0 + // New thread expectations + expectedNewThread.PC = prestateNextPC + expectedNewThread.NextPC = prestateNextPC + 4 + expectedNewThread.ThreadId = 1 + expectedNewThread.Registers[register.RegSyscallRet1] = 0 + expectedNewThread.Registers[register.RegSyscallErrno] = 0 + expectedNewThread.Registers[register.RegSP] = stackPointer + + return ExpectNormalExecution() +} + func TestEVM_SysGetTID(t *testing.T) { - cases := []struct { + type testCase struct { name string threadId Word - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {"zero", 0}, {"non-zero", 11}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*789))) - state := mtutil.GetMtState(t, goVm) - mtutil.InitializeSingleThread(i*789, state, false) - - state.GetCurrentThread().ThreadId = c.threadId - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number - step := state.Step - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = c.threadId - expected.ActiveThread().Registers[7] = 0 - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + mtutil.InitializeSingleThread(r.Intn(10000), state, false) + state.GetCurrentThread().ThreadId = c.threadId + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = c.threadId + expected.ActiveThread().Registers[7] = 0 + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysExit(t *testing.T) { - cases := []struct { + type testVariation struct { + name string + traverseRight bool + } + testVariations := []testVariation{ + {name: "traverse right", traverseRight: true}, + {name: "traverse left", traverseRight: false}, + } + + type baseTest struct { name string threadCount int shouldExitGlobally bool - }{ + } + baseTests := []baseTest{ // If we exit the last thread, the whole process should exit {name: "one thread", threadCount: 1, shouldExitGlobally: true}, {name: "two threads ", threadCount: 2}, {name: "three threads ", threadCount: 3}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - exitCode := uint8(3) - - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*133))) - state := mtutil.GetMtState(t, goVm) - mtutil.SetupThreads(int64(i*1111), state, i%2 == 0, c.threadCount, 0) - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysExit // Set syscall number - state.GetRegistersRef()[4] = Word(exitCode) // The first argument (exit code) - step := state.Step - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.Step += 1 - expected.StepsSinceLastContextSwitch += 1 - expected.ActiveThread().Exited = true - expected.ActiveThread().ExitCode = exitCode - if c.shouldExitGlobally { - expected.Exited = true - expected.ExitCode = exitCode - } - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + type testCase = testutil.TestCaseVariation[baseTest, testVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + cases := testutil.TestVariations(baseTests, testVariations) + + exitCode := uint8(3) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := tt.Base + mtutil.SetupThreads(r.Int64(10000), state, tt.Variation.traverseRight, c.threadCount, 0) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysExit // Set syscall number + state.GetRegistersRef()[4] = Word(exitCode) // The first argument (exit code) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.Step += 1 + expected.ExpectNoContextSwitch() + expected.ActiveThread().Exited = true + expected.ActiveThread().ExitCode = exitCode + if c.Base.shouldExitGlobally { + expected.Exited = true + expected.ExitCode = exitCode } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_PopExitedThread(t *testing.T) { - cases := []struct { + type testCase struct { name string traverseRight bool activeStackThreadCount int expectTraverseRightPostState bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "traverse right", traverseRight: true, activeStackThreadCount: 2, expectTraverseRightPostState: true}, {name: "traverse right, switch directions", traverseRight: true, activeStackThreadCount: 1, expectTraverseRightPostState: false}, {name: "traverse left", traverseRight: false, activeStackThreadCount: 2, expectTraverseRightPostState: false}, {name: "traverse left, switch directions", traverseRight: false, activeStackThreadCount: 1, expectTraverseRightPostState: true}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*133))) - state := mtutil.GetMtState(t, goVm) - mtutil.SetupThreads(int64(i*222), state, c.traverseRight, c.activeStackThreadCount, 1) - step := state.Step - - // Setup thread to be dropped - threadToPop := state.GetCurrentThread() - threadToPop.Exited = true - threadToPop.ExitCode = 1 - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.Step += 1 - expected.ActiveThreadId = mtutil.FindNextThreadExcluding(state, threadToPop.ThreadId).ThreadId - expected.StepsSinceLastContextSwitch = 0 - expected.ThreadCount -= 1 - expected.TraverseRight = c.expectTraverseRightPostState - expected.Thread(threadToPop.ThreadId).Dropped = true - if c.traverseRight { - expected.RightStackSize -= 1 - } else { - expected.LeftStackSize -= 1 - } - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + mtutil.SetupThreads(r.Int64(1000), state, c.traverseRight, c.activeStackThreadCount, 1) + threadToPop := state.GetCurrentThread() + threadToPop.Exited = true + threadToPop.ExitCode = 1 + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.Step += 1 + expected.ExpectPoppedThread() + expected.ExpectContextSwitch() + expected.ExpectTraverseRight(c.expectTraverseRightPostState) + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysFutex_WaitPrivate(t *testing.T) { - // Note: parameters are written as 64-bit values. For 32-bit architectures, these values are downcast to 32-bit - cases := []struct { + type testCase struct { name string addressParam uint64 effAddr uint64 @@ -471,7 +449,13 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { actualValue uint32 timeout uint64 shouldFail bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "successful wait, no timeout", addressParam: 0xFF_FF_FF_FF_FF_FF_12_38, effAddr: 0xFF_FF_FF_FF_FF_FF_12_38, targetValue: 0xFF_FF_FF_01, actualValue: 0xFF_FF_FF_01}, {name: "successful wait, no timeout, unaligned addr #1", addressParam: 0xFF_FF_FF_FF_FF_FF_12_33, effAddr: 0xFF_FF_FF_FF_FF_FF_12_30, targetValue: 0x01, actualValue: 0x01}, {name: "successful wait, no timeout, unaligned addr #2", addressParam: 0xFF_FF_FF_FF_FF_FF_12_37, effAddr: 0xFF_FF_FF_FF_FF_FF_12_34, targetValue: 0x01, actualValue: 0x01}, @@ -484,63 +468,56 @@ func TestEVM_SysFutex_WaitPrivate(t *testing.T) { {name: "memory mismatch w timeout", addressParam: 0xFF_FF_FF_FF_FF_FF_12_00, effAddr: 0xFF_FF_FF_FF_FF_FF_12_00, targetValue: 0xFF_FF_FF_F8, actualValue: 0xF8, timeout: 2000000, shouldFail: true}, {name: "memory mismatch w timeout, unaligned", addressParam: 0xFF_FF_FF_FF_FF_FF_12_0F, effAddr: 0xFF_FF_FF_FF_FF_FF_12_0C, targetValue: 0xFF_FF_FF_01, actualValue: 0xFF_FF_FF_02, timeout: 2000000, shouldFail: true}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - rand := testutil.NewRandHelper(int64(i * 33)) - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*1234)), mtutil.WithPCAndNextPC(0x04)) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - testutil.RandomizeWordAndSetUint32(state.GetMemory(), Word(c.effAddr), c.actualValue, int64(i+22)) - state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number - state.GetRegistersRef()[4] = Word(c.addressParam) - state.GetRegistersRef()[5] = exec.FutexWaitPrivate - // Randomize upper bytes of futex target - state.GetRegistersRef()[6] = (rand.Word() & ^Word(0xFF_FF_FF_FF)) | Word(c.targetValue) - state.GetRegistersRef()[7] = Word(c.timeout) - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.Step += 1 - expected.ActiveThread().PC = state.GetCpu().NextPC - expected.ActiveThread().NextPC = state.GetCpu().NextPC + 4 - if c.shouldFail { - expected.StepsSinceLastContextSwitch += 1 - expected.ActiveThread().Registers[2] = exec.MipsEAGAIN - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } else { - // Return empty result and preempt thread - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - expected.ExpectPreemption(state) - } - - // State transition - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + testutil.RandomizeWordAndSetUint32(state.GetMemory(), Word(c.effAddr), c.actualValue, r.Int64(1000)) + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number + state.GetRegistersRef()[4] = Word(c.addressParam) + state.GetRegistersRef()[5] = exec.FutexWaitPrivate + // Randomize upper bytes of futex target + state.GetRegistersRef()[6] = (rand.Word() & ^Word(0xFF_FF_FF_FF)) | Word(c.targetValue) + state.GetRegistersRef()[7] = Word(c.timeout) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.Step += 1 + expected.ActiveThread().PC = expected.ActiveThread().NextPC + expected.ActiveThread().NextPC = expected.ActiveThread().NextPC + 4 + if c.shouldFail { + expected.ExpectNoContextSwitch() + expected.ActiveThread().Registers[2] = exec.MipsEAGAIN + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + } else { + // Return empty result and preempt thread + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + expected.ExpectPreemption() } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysFutex_WakePrivate(t *testing.T) { - // Note: parameters are written as 64-bit values. For 32-bit architectures, these values are downcast to 32-bit - cases := []struct { + type testCase struct { name string addressParam uint64 effAddr uint64 activeThreadCount int inactiveThreadCount int traverseRight bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "Traverse right", addressParam: 0xFF_FF_FF_FF_FF_FF_67_00, effAddr: 0xFF_FF_FF_FF_FF_FF_67_00, activeThreadCount: 2, inactiveThreadCount: 1, traverseRight: true}, {name: "Traverse right, unaligned addr #1", addressParam: 0xFF_FF_FF_FF_FF_FF_67_83, effAddr: 0xFF_FF_FF_FF_FF_FF_67_80, activeThreadCount: 2, inactiveThreadCount: 1, traverseRight: true}, {name: "Traverse right, unaligned addr #2", addressParam: 0xFF_FF_FF_FF_FF_FF_67_87, effAddr: 0xFF_FF_FF_FF_FF_FF_67_84, activeThreadCount: 2, inactiveThreadCount: 1, traverseRight: true}, @@ -557,38 +534,27 @@ func TestEVM_SysFutex_WakePrivate(t *testing.T) { {name: "Traverse left, single thread", addressParam: 0xFF_FF_FF_FF_FF_FF_67_88, effAddr: 0xFF_FF_FF_FF_FF_FF_67_88, activeThreadCount: 1, inactiveThreadCount: 0, traverseRight: false}, {name: "Traverse left, single thread, unaligned", addressParam: 0xFF_FF_FF_FF_FF_FF_67_89, effAddr: 0xFF_FF_FF_FF_FF_FF_67_88, activeThreadCount: 1, inactiveThreadCount: 0, traverseRight: false}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*1122))) - state := mtutil.GetMtState(t, goVm) - mtutil.SetupThreads(int64(i*2244), state, c.traverseRight, c.activeThreadCount, c.inactiveThreadCount) - step := state.Step - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number - state.GetRegistersRef()[4] = Word(c.addressParam) - state.GetRegistersRef()[5] = exec.FutexWakePrivate - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - expected.ExpectPreemption(state) - - // State transition - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + mtutil.SetupThreads(r.Int64(1000), state, c.traverseRight, c.activeThreadCount, c.inactiveThreadCount) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number + state.GetRegistersRef()[4] = Word(c.addressParam) + state.GetRegistersRef()[5] = exec.FutexWakePrivate + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + expected.ExpectPreemption() + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { @@ -609,68 +575,60 @@ func TestEVM_SysFutex_UnsupportedOp(t *testing.T) { const FUTEX_CMP_REQUEUE_PI = 12 const FUTEX_LOCK_PI2 = 13 - unsupportedFutexOps := map[string]Word{ - "FUTEX_WAIT": FUTEX_WAIT, - "FUTEX_WAKE": FUTEX_WAKE, - "FUTEX_FD": FUTEX_FD, - "FUTEX_REQUEUE": FUTEX_REQUEUE, - "FUTEX_CMP_REQUEUE": FUTEX_CMP_REQUEUE, - "FUTEX_WAKE_OP": FUTEX_WAKE_OP, - "FUTEX_LOCK_PI": FUTEX_LOCK_PI, - "FUTEX_UNLOCK_PI": FUTEX_UNLOCK_PI, - "FUTEX_TRYLOCK_PI": FUTEX_TRYLOCK_PI, - "FUTEX_WAIT_BITSET": FUTEX_WAIT_BITSET, - "FUTEX_WAKE_BITSET": FUTEX_WAKE_BITSET, - "FUTEX_WAIT_REQUEUE_PI": FUTEX_WAIT_REQUEUE_PI, - "FUTEX_CMP_REQUEUE_PI": FUTEX_CMP_REQUEUE_PI, - "FUTEX_LOCK_PI2": FUTEX_LOCK_PI2, - "FUTEX_REQUEUE_PRIVATE": (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG), - "FUTEX_CMP_REQUEUE_PRIVATE": (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG), - "FUTEX_WAKE_OP_PRIVATE": (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG), - "FUTEX_LOCK_PI_PRIVATE": (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG), - "FUTEX_LOCK_PI2_PRIVATE": (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG), - "FUTEX_UNLOCK_PI_PRIVATE": (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG), - "FUTEX_TRYLOCK_PI_PRIVATE": (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG), - "FUTEX_WAIT_BITSET_PRIVATE": (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG), - "FUTEX_WAKE_BITSET_PRIVATE": (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG), - "FUTEX_WAIT_REQUEUE_PI_PRIVATE": (FUTEX_WAIT_REQUEUE_PI | FUTEX_PRIVATE_FLAG), - "FUTEX_CMP_REQUEUE_PI_PRIVATE": (FUTEX_CMP_REQUEUE_PI | FUTEX_PRIVATE_FLAG), - } - - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for name, op := range unsupportedFutexOps { - testName := fmt.Sprintf("%v (%v)", name, ver.Name) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(op))) - state := mtutil.GetMtState(t, goVm) - step := state.GetStep() - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number - state.GetRegistersRef()[5] = op - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.Step += 1 - expected.StepsSinceLastContextSwitch += 1 - expected.ActiveThread().PC = state.GetCpu().NextPC - expected.ActiveThread().NextPC = state.GetCpu().NextPC + 4 - expected.ActiveThread().Registers[2] = exec.MipsEINVAL - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } + type testCase struct { + name string + op Word } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ + {"FUTEX_WAIT", FUTEX_WAIT}, + {"FUTEX_WAKE", FUTEX_WAKE}, + {"FUTEX_FD", FUTEX_FD}, + {"FUTEX_REQUEUE", FUTEX_REQUEUE}, + {"FUTEX_CMP_REQUEUE", FUTEX_CMP_REQUEUE}, + {"FUTEX_WAKE_OP", FUTEX_WAKE_OP}, + {"FUTEX_LOCK_PI", FUTEX_LOCK_PI}, + {"FUTEX_UNLOCK_PI", FUTEX_UNLOCK_PI}, + {"FUTEX_TRYLOCK_PI", FUTEX_TRYLOCK_PI}, + {"FUTEX_WAIT_BITSET", FUTEX_WAIT_BITSET}, + {"FUTEX_WAKE_BITSET", FUTEX_WAKE_BITSET}, + {"FUTEX_WAIT_REQUEUE_PI", FUTEX_WAIT_REQUEUE_PI}, + {"FUTEX_CMP_REQUEUE_PI", FUTEX_CMP_REQUEUE_PI}, + {"FUTEX_LOCK_PI2", FUTEX_LOCK_PI2}, + {"FUTEX_REQUEUE_PRIVATE", (FUTEX_REQUEUE | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_CMP_REQUEUE_PRIVATE", (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_WAKE_OP_PRIVATE", (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_LOCK_PI_PRIVATE", (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_LOCK_PI2_PRIVATE", (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_UNLOCK_PI_PRIVATE", (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_TRYLOCK_PI_PRIVATE", (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_WAIT_BITSET_PRIVATE", (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_WAKE_BITSET_PRIVATE", (FUTEX_WAKE_BITSET | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_WAIT_REQUEUE_PI_PRIVATE", (FUTEX_WAIT_REQUEUE_PI | FUTEX_PRIVATE_FLAG)}, + {"FUTEX_CMP_REQUEUE_PI_PRIVATE", (FUTEX_CMP_REQUEUE_PI | FUTEX_PRIVATE_FLAG)}, + } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysFutex // Set syscall number + state.GetRegistersRef()[5] = c.op + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = exec.MipsEINVAL + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + return ExpectNormalExecution() + } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysYield(t *testing.T) { @@ -682,113 +640,90 @@ func TestEVM_SysNanosleep(t *testing.T) { } func runPreemptSyscall(t *testing.T, syscallName string, syscallNum uint32) { - cases := []struct { + type testVariation struct { + name string + traverseRight bool + } + testVariations := []testVariation{ + {"Traverse right", true}, + {"Traverse left", false}, + } + + type baseTest struct { name string - traverseRight bool activeThreads int inactiveThreads int - }{ + } + baseTests := []baseTest{ {name: "Last active thread", activeThreads: 1, inactiveThreads: 2}, {name: "Only thread", activeThreads: 1, inactiveThreads: 0}, {name: "Do not change directions", activeThreads: 2, inactiveThreads: 2}, {name: "Do not change directions", activeThreads: 3, inactiveThreads: 0}, } - versions := GetMipsVersionTestCases(t) - for _, ver := range versions { - for i, c := range cases { - for _, traverseRight := range []bool{true, false} { - testName := fmt.Sprintf("%v: %v (vm = %v, traverseRight = %v)", syscallName, c.name, ver.Name, traverseRight) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*789))) - state := mtutil.GetMtState(t, goVm) - mtutil.SetupThreads(int64(i*3259), state, traverseRight, c.activeThreads, c.inactiveThreads) - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = Word(syscallNum) // Set syscall number - step := state.Step - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ExpectPreemption(state) - expected.PrestateActiveThread().Registers[2] = 0 - expected.PrestateActiveThread().Registers[7] = 0 - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) - } - } + type testCase = testutil.TestCaseVariation[baseTest, testVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + cases := testutil.TestVariations(baseTests, testVariations) + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := tt.Base + mtutil.SetupThreads(r.Int64(1000), state, tt.Variation.traverseRight, c.activeThreads, c.inactiveThreads) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = Word(syscallNum) // Set syscall number + } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ExpectPreemption() + expected.PrestateActiveThread().Registers[2] = 0 + expected.PrestateActiveThread().Registers[7] = 0 + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func TestEVM_SysOpen(t *testing.T) { - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - t.Run(ver.Name, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(5512))) - state := mtutil.GetMtState(t, goVm) - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysOpen // Set syscall number - step := state.Step - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = exec.MipsEBADF - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysOpen // Set syscall number + } - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + setExpectations := func(t require.TestingT, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = exec.MipsEBADF + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + return ExpectNormalExecution() } + NewSimpleDiffTester(). + InitState(initState). + SetExpectations(setExpectations). + Run(t) } func TestEVM_SysGetPID(t *testing.T) { - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - t.Run(ver.Name, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(1929))) - state := mtutil.GetMtState(t, goVm) - - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysGetpid // Set syscall number - step := state.Step - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysGetpid // Set syscall number + } - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + setExpectations := func(t require.TestingT, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + return ExpectNormalExecution() } + + NewSimpleDiffTester(). + InitState(initState). + SetExpectations(setExpectations). + Run(t) } func TestEVM_SysClockGettimeMonotonic(t *testing.T) { @@ -837,7 +772,7 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { } cases := testutil.TestVariations(baseTests, llVariations) - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { c := tt.Base llVar := tt.Variation @@ -861,7 +796,7 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { llOwnerThread = state.GetCurrentThread().ThreadId + 1 } - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = arch.SysClockGetTime // Set syscall number state.GetRegistersRef()[4] = clkid // a0 state.GetRegistersRef()[5] = c.timespecAddr // a1 @@ -870,7 +805,7 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { state.LLOwnerThread = llOwnerThread } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { c := tt.Base llVar := tt.Variation @@ -901,71 +836,25 @@ func testEVM_SysClockGettime(t *testing.T, clkid Word) { } func TestEVM_SysClockGettimeNonMonotonic(t *testing.T) { - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - t.Run(ver.Name, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(2101))) - state := mtutil.GetMtState(t, goVm) - - timespecAddr := Word(0x1000) - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysClockGetTime // Set syscall number - state.GetRegistersRef()[4] = 0xDEAD // a0 - invalid clockid - state.GetRegistersRef()[5] = timespecAddr // a1 - step := state.Step - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = exec.MipsEINVAL - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) + initState := func(t require.TestingT, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + timespecAddr := Word(0x1000) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysClockGetTime // Set syscall number + state.GetRegistersRef()[4] = 0xDEAD // a0 - invalid clockid + state.GetRegistersRef()[5] = timespecAddr // a1 + } - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + setExpectations := func(t require.TestingT, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = exec.MipsEINVAL + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + return ExpectNormalExecution() } -} -var NoopSyscalls = map[string]uint32{ - "SysGetAffinity": 4240, - "SysMadvise": 4218, - "SysRtSigprocmask": 4195, - "SysSigaltstack": 4206, - "SysRtSigaction": 4194, - "SysPrlimit64": 4338, - "SysClose": 4006, - "SysPread64": 4200, - "SysStat": 4106, - "SysFstat": 4108, - "SysFstat64": 4215, - "SysOpenAt": 4288, - "SysReadlink": 4085, - "SysReadlinkAt": 4298, - "SysIoctl": 4054, - "SysEpollCreate1": 4326, - "SysPipe2": 4328, - "SysEpollCtl": 4249, - "SysEpollPwait": 4313, - "SysGetRandom": 4353, - "SysUname": 4122, - "SysStat64": 4213, - "SysGetuid": 4024, - "SysGetgid": 4047, - "SysLlseek": 4140, - "SysMinCore": 4217, - "SysTgkill": 4266, - "SysGetRLimit": 4076, - "SysLseek": 4019, - "SysMunmap": 4091, - "SysSetITimer": 4104, - "SysTimerCreate": 4257, - "SysTimerSetTime": 4258, - "SysTimerDelete": 4261, + NewSimpleDiffTester(). + InitState(initState). + SetExpectations(setExpectations). + Run(t) } func TestEVM_EmptyThreadStacks(t *testing.T) { @@ -992,12 +881,12 @@ func TestEVM_EmptyThreadStacks(t *testing.T) { cases := testutil.TestVariations(baseTests, proofVariations) - initState := func(c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { b := c.Base mtutil.SetupThreads(r.Int64(1000), state, b.traverseRight, 0, b.otherStackSize) } - setExpectations := func(c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { goPanic := "Active thread stack is empty" evmErr := "active thread stack is empty" return ExpectVmPanic(goPanic, evmErr, WithProofData(c.Variation.Proof)) @@ -1010,103 +899,124 @@ func TestEVM_EmptyThreadStacks(t *testing.T) { } func TestEVM_NormalTraversal_Full(t *testing.T) { - cases := []struct { + type testVariation struct { + name string + traverseRight bool + } + testVariations := []testVariation{ + {"Traverse right", true}, + {"Traverse left", false}, + } + + type baseTest struct { name string threadCount int - }{ + } + baseTests := []baseTest{ {"1 thread", 1}, {"2 threads", 2}, {"3 threads", 3}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - for _, traverseRight := range []bool{true, false} { - testName := fmt.Sprintf("%v (vm = %v, traverseRight = %v)", c.name, ver.Name, traverseRight) - t.Run(testName, func(t *testing.T) { - // Setup - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*789))) - state := mtutil.GetMtState(t, goVm) - mtutil.SetupThreads(int64(i*2947), state, traverseRight, c.threadCount, 0) - step := state.Step - - // Loop through all the threads to get back to the starting state - iterations := c.threadCount * 2 - for i := 0; i < iterations; i++ { - // Set up thread to yield - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = Word(arch.SysSchedYield) - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - expected.ExpectStep() - expected.ExpectPreemption(state) - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - } - }) + type testCase = testutil.TestCaseVariation[baseTest, testVariation] + testNamer := func(tc testCase) string { + return fmt.Sprintf("%v-%v", tc.Base.name, tc.Variation.name) + } + + syscallNumReg := 2 + // The ori (or immediate) instruction sets register 2 to SysSchedYield + oriInsn := uint32((0b001101 << 26) | (syscallNumReg & 0x1F << 16) | (0xFFFF & arch.SysSchedYield)) + + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + c := tt.Base + traverseRight := tt.Variation.traverseRight + mtutil.SetupThreads(r.Int64(1000), state, traverseRight, c.threadCount, 0) + state.Step = 0 + + // Set up each thread with a sequence of instructions + threads, _ := mtutil.GetThreadStacks(state) + for i := 0; i < c.threadCount; i++ { + thread := threads[i] + pc := thread.Cpu.PC + // Each thread will be accessed twice + for j := 0; j < 2; j++ { + // First run the ori instruction to set the syscall register + // Then run the syscall (yield) + testutil.StoreInstruction(state.Memory, pc, oriInsn) + testutil.StoreInstruction(state.Memory, pc+4, syscallInsn) + pc += 8 } } } + + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + if expected.Step%2 == 0 { + // Even instructions will be the "or immediate" insn that sets our yield syscall num + expected.ExpectStep() + expected.ActiveThread().Registers[syscallNumReg] = arch.SysSchedYield + } else { + // Odd instructions will cause a yield + expected.ExpectStep() + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + expected.ExpectPreemption() + } + return ExpectNormalExecution() + } + + diffTester := NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations) + + for _, bt := range baseTests { + // Loop through all the threads to get back to the starting state + // We want to loop 2x for each thread, where each loop takes 2 instructions + steps := bt.threadCount * 4 + + cases := testutil.TestVariations([]baseTest{bt}, testVariations) + diffTester.Run(t, cases, WithSteps(steps)) + } } func TestEVM_SchedQuantumThreshold(t *testing.T) { - cases := []struct { + type testCase struct { name string stepsSinceLastContextSwitch uint64 shouldPreempt bool - }{ + } + + testNamer := func(tc testCase) string { + return tc.name + } + + cases := []testCase{ {name: "just under threshold", stepsSinceLastContextSwitch: exec.SchedQuantum - 1}, {name: "at threshold", stepsSinceLastContextSwitch: exec.SchedQuantum, shouldPreempt: true}, {name: "beyond threshold", stepsSinceLastContextSwitch: exec.SchedQuantum + 1, shouldPreempt: true}, } - vmVersions := GetMipsVersionTestCases(t) - for _, ver := range vmVersions { - for i, c := range cases { - testName := fmt.Sprintf("%v (%v)", c.name, ver.Name) - t.Run(testName, func(t *testing.T) { - goVm := ver.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i*789))) - state := mtutil.GetMtState(t, goVm) - // Setup basic getThreadId syscall instruction - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) - state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number - state.StepsSinceLastContextSwitch = c.stepsSinceLastContextSwitch - step := state.Step - - // Set up post-state expectations - expected := mtutil.NewExpectedState(t, state) - if c.shouldPreempt { - expected.Step += 1 - expected.ExpectPreemption(state) - } else { - // Otherwise just expect a normal step - expected.ExpectStep() - expected.ActiveThread().Registers[2] = state.GetCurrentThread().ThreadId - expected.ActiveThread().Registers[7] = 0 - } - - // State transition - var err error - var stepWitness *mipsevm.StepWitness - stepWitness, err = goVm.Step(true) - require.NoError(t, err) - - // Validate post-state - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), ver.Contracts) - }) + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + // Setup basic getThreadId syscall instruction + testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + state.GetRegistersRef()[2] = arch.SysGetTID // Set syscall number + state.StepsSinceLastContextSwitch = c.stepsSinceLastContextSwitch + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + if c.shouldPreempt { + expected.Step += 1 + expected.ExpectPreemption() + } else { + // Otherwise just expect a normal step + expected.ExpectStep() + expected.ActiveThread().Registers[2] = expected.ActiveThreadId() + expected.ActiveThread().Registers[7] = 0 } + return ExpectNormalExecution() } + + NewDiffTester(testNamer). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } diff --git a/cannon/mipsevm/tests/fuzz_evm_common64_test.go b/cannon/mipsevm/tests/fuzz_evm_common64_test.go index 542b63a09b0e6..6518a46aa3239 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common64_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common64_test.go @@ -1,133 +1,99 @@ package tests import ( - "os" "testing" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" ) -func FuzzStateConsistencyMulOp(f *testing.F) { - f.Add(int64(0x80_00_00_00), int64(0x80_00_00_00), int64(1)) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)), - int64(1), - ) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)), - int64(1), - ) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)), - int64(1), - ) - +func FuzzMulOp(f *testing.F) { const opcode uint32 = 28 const mulFunct uint32 = 0x2 - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, rs int64, rt int64, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - mulOpConsistencyCheck(t, versions, opcode, true, mulFunct, Word(rs), Word(rt), seed) - }) - } - }) + multiplier := func(rs, rt Word) uint64 { + return uint64(int64(int32(rs)) * int64(int32(rt))) + } + mulOpCheck(f, multiplier, opcode, true, mulFunct) } -func FuzzStateConsistencyMultOp(f *testing.F) { - f.Add(int64(0x80_00_00_00), int64(0x80_00_00_00), int64(1)) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_11_22_33_44)), - int64(1), - ) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_80_00_00_00)), - int64(1), - ) - f.Add( - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)), - testutil.ToSignedInteger(uint64(0xFF_FF_FF_FF_FF_FF_FF_FF)), - int64(1), - ) - +func FuzzMultOp(f *testing.F) { const multFunct uint32 = 0x18 - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, rs int64, rt int64, seed int64) { - mulOpConsistencyCheck(t, versions, 0, false, multFunct, Word(rs), Word(rt), seed) - }) + multiplier := func(rs, rt Word) uint64 { + return uint64(int64(int32(rs)) * int64(int32(rt))) + } + mulOpCheck(f, multiplier, 0, false, multFunct) } -func FuzzStateConsistencyMultuOp(f *testing.F) { - f.Add(uint64(0x80_00_00_00), uint64(0x80_00_00_00), int64(1)) +func FuzzMultuOp(f *testing.F) { + const multuFunct uint32 = 0x19 + multiplier := func(rs, rt Word) uint64 { + return uint64(uint32(rs)) * uint64(uint32(rt)) + } + mulOpCheck(f, multiplier, 0, false, multuFunct) +} + +type multiplierFn func(rs, rt Word) uint64 + +func mulOpCheck(f *testing.F, multiplier multiplierFn, opcode uint32, expectRdReg bool, funct uint32) { + f.Add(int64(0x80_00_00_00), int64(0x80_00_00_00), int64(1)) f.Add( - uint64(0xFF_FF_FF_FF_11_22_33_44), - uint64(0xFF_FF_FF_FF_11_22_33_44), + testutil.ToSignedInteger(0xFF_FF_FF_FF_11_22_33_44), + testutil.ToSignedInteger(0xFF_FF_FF_FF_11_22_33_44), int64(1), ) f.Add( - uint64(0xFF_FF_FF_FF_80_00_00_00), - uint64(0xFF_FF_FF_FF_80_00_00_00), + testutil.ToSignedInteger(0xFF_FF_FF_FF_80_00_00_00), + testutil.ToSignedInteger(0xFF_FF_FF_FF_80_00_00_00), int64(1), ) f.Add( - uint64(0xFF_FF_FF_FF_FF_FF_FF_FF), - uint64(0xFF_FF_FF_FF_FF_FF_FF_FF), + testutil.ToSignedInteger(0xFF_FF_FF_FF_FF_FF_FF_FF), + testutil.ToSignedInteger(0xFF_FF_FF_FF_FF_FF_FF_FF), int64(1), ) - const multuFunct uint32 = 0x19 - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, rs uint64, rt uint64, seed int64) { - mulOpConsistencyCheck(t, versions, 0, false, multuFunct, rs, rt, seed) - }) -} - -func mulOpConsistencyCheck( - t *testing.T, versions []VersionedVMTestCase, - opcode uint32, expectRdReg bool, funct uint32, - rs Word, rt Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - rsReg := uint32(17) - rtReg := uint32(18) - rdReg := uint32(0) - if expectRdReg { - rdReg = 19 - } - - insn := opcode<<26 | rsReg<<21 | rtReg<<16 | rdReg<<11 | funct - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(seed), mtutil.WithPCAndNextPC(0)) - state := goVm.GetState() - state.GetRegistersRef()[rsReg] = rs - state.GetRegistersRef()[rtReg] = rt - testutil.StoreInstruction(state.GetMemory(), 0, insn) - step := state.GetStep() + vms := GetMipsVersionTestCases(f) + type testCase struct { + rs Word + rt Word + } - // mere sanity checks - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() + rsReg := uint32(17) + rtReg := uint32(18) + rdReg := uint32(0) + if expectRdReg { + rdReg = 19 + } + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + insn := opcode<<26 | rsReg<<21 | rtReg<<16 | rdReg<<11 | funct + storeInsnWithCache(state, goVm, 0, insn) + state.GetRegistersRef()[rsReg] = c.rs + state.GetRegistersRef()[rtReg] = c.rt + } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + result := multiplier(c.rs, c.rt) + if expectRdReg { + expected.ActiveThread().Registers[rdReg] = exec.SignExtend(result, 32) + } else { + expected.ActiveThread().LO = exec.SignExtend(result, 32) + expected.ActiveThread().HI = exec.SignExtend(result>>32, 32) + } + return ExpectNormalExecution() + } - // use the post-state rdReg or LO and HI just so we can run sanity checks - if expectRdReg { - expected.ActiveThread().Registers[rdReg] = state.GetRegistersRef()[rdReg] - } else { - expected.ActiveThread().LO = state.GetCpu().LO - expected.ActiveThread().HI = state.GetCpu().HI - } - expected.Validate(t, state) + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState, mtutil.WithPCAndNextPC(0)). + SetExpectations(setExpectations) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + f.Fuzz(func(t *testing.T, rs, rt, seed int64) { + tests := []testCase{{rs: Word(rs), rt: Word(rt)}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed)...) + }) } diff --git a/cannon/mipsevm/tests/fuzz_evm_common_test.go b/cannon/mipsevm/tests/fuzz_evm_common_test.go index dd7822c41a2f7..b475f9508b8e1 100644 --- a/cannon/mipsevm/tests/fuzz_evm_common_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_common_test.go @@ -3,15 +3,16 @@ package tests import ( "bytes" "math" - "os" "testing" "github.com/ethereum/go-ethereum/crypto" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/memory" + "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" mtutil "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded/testutil" "github.com/ethereum-optimism/optimism/cannon/mipsevm/program" "github.com/ethereum-optimism/optimism/cannon/mipsevm/testutil" @@ -21,29 +22,26 @@ import ( const syscallInsn = uint32(0x00_00_00_0c) func FuzzStateSyscallBrk(f *testing.F) { - versions := GetMipsVersionTestCases(f) + vms := GetMipsVersionTestCases(f) + + initState := func(t require.TestingT, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.GetRegistersRef()[2] = arch.SysBrk + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + } + + setExpectations := func(t require.TestingT, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = program.PROGRAM_BREAK // Return fixed BRK value + expected.ActiveThread().Registers[7] = 0 // No error + return ExpectNormalExecution() + } + + diffTester := NewSimpleDiffTester(). + InitState(initState). + SetExpectations(setExpectations) + f.Fuzz(func(t *testing.T, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(seed)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysBrk - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - step := state.GetStep() - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = program.PROGRAM_BREAK // Return fixed BRK value - expected.ActiveThread().Registers[7] = 0 // No error - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + diffTester.Run(t, fuzzTestOptions(vms, seed)...) }) } @@ -54,367 +52,440 @@ func FuzzStateSyscallMmap(f *testing.F) { // Check edge case - just within bounds f.Add(Word(0), Word(0x1000), Word(program.HEAP_END-4096), int64(3)) - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr Word, siz Word, heap Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed), mtutil.WithHeap(heap)) - state := goVm.GetState() - step := state.GetStep() - - state.GetRegistersRef()[2] = arch.SysMmap - state.GetRegistersRef()[4] = addr - state.GetRegistersRef()[5] = siz - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if addr == 0 { - sizAlign := siz - if sizAlign&memory.PageAddrMask != 0 { // adjust size to align with page size - sizAlign = siz + memory.PageSize - (siz & memory.PageAddrMask) - } - newHeap := heap + sizAlign - if newHeap > program.HEAP_END || newHeap < heap || sizAlign < siz { - expected.ActiveThread().Registers[2] = exec.MipsEINVAL - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } else { - expected.Heap = heap + sizAlign - expected.ActiveThread().Registers[2] = heap - expected.ActiveThread().Registers[7] = 0 // no error - } - } else { - expected.ActiveThread().Registers[2] = addr - expected.ActiveThread().Registers[7] = 0 // no error - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + vms := GetMipsVersionTestCases(f) + type testCase struct { + addr Word + siz Word + heap Word + } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.Heap = c.heap + state.GetRegistersRef()[2] = arch.SysMmap + state.GetRegistersRef()[4] = c.addr + state.GetRegistersRef()[5] = c.siz + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if c.addr == 0 { + sizAlign := c.siz + if sizAlign&memory.PageAddrMask != 0 { // adjust size to align with page size + sizAlign = c.siz + memory.PageSize - (c.siz & memory.PageAddrMask) + } + newHeap := c.heap + sizAlign + if newHeap > program.HEAP_END || newHeap < c.heap || sizAlign < c.siz { + expected.ActiveThread().Registers[2] = exec.MipsEINVAL + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + } else { + expected.Heap = c.heap + sizAlign + expected.ActiveThread().Registers[2] = c.heap + expected.ActiveThread().Registers[7] = 0 // no error + } + } else { + expected.ActiveThread().Registers[2] = c.addr + expected.ActiveThread().Registers[7] = 0 // no error } + return ExpectNormalExecution() + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations) + + f.Fuzz(func(t *testing.T, addr Word, siz Word, heap Word, seed int64) { + tests := []testCase{{addr, siz, heap}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed)...) }) } func FuzzStateSyscallExitGroup(f *testing.F) { - versions := GetMipsVersionTestCases(f) + vms := GetMipsVersionTestCases(f) + type testCase struct { + exitCode uint8 + } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.GetRegistersRef()[2] = arch.SysExitGroup + state.GetRegistersRef()[4] = Word(c.exitCode) + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.Step += 1 + expected.ExpectNoContextSwitch() + expected.Exited = true + expected.ExitCode = c.exitCode + return ExpectNormalExecution() + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations) + f.Fuzz(func(t *testing.T, exitCode uint8, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysExitGroup - state.GetRegistersRef()[4] = Word(exitCode) - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - step := state.GetStep() - - expected := mtutil.NewExpectedState(t, state) - expected.Step += 1 - expected.StepsSinceLastContextSwitch += 1 - expected.Exited = true - expected.ExitCode = exitCode - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) - } + tests := []testCase{{exitCode}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed)...) }) } func FuzzStateSyscallFcntl(f *testing.F) { - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, fd Word, cmd Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysFcntl - state.GetRegistersRef()[4] = fd - state.GetRegistersRef()[5] = cmd - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - step := state.GetStep() - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if cmd == 1 { - switch fd { - case exec.FdStdin, exec.FdStdout, exec.FdStderr, - exec.FdPreimageRead, exec.FdHintRead, exec.FdPreimageWrite, exec.FdHintWrite: - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - default: - expected.ActiveThread().Registers[2] = exec.MipsEBADF - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } - } else if cmd == 3 { - switch fd { - case exec.FdStdin, exec.FdPreimageRead, exec.FdHintRead: - expected.ActiveThread().Registers[2] = 0 - expected.ActiveThread().Registers[7] = 0 - case exec.FdStdout, exec.FdStderr, exec.FdPreimageWrite, exec.FdHintWrite: - expected.ActiveThread().Registers[2] = 1 - expected.ActiveThread().Registers[7] = 0 - default: - expected.ActiveThread().Registers[2] = exec.MipsEBADF - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } - } else { - expected.ActiveThread().Registers[2] = exec.MipsEINVAL - expected.ActiveThread().Registers[7] = exec.SysErrorSignal - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + vms := GetMipsVersionTestCases(f) + type testCase struct { + fd Word + cmd Word + } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.GetRegistersRef()[2] = arch.SysFcntl + state.GetRegistersRef()[4] = c.fd + state.GetRegistersRef()[5] = c.cmd + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if c.cmd == 1 { + switch c.fd { + case exec.FdStdin, exec.FdStdout, exec.FdStderr, + exec.FdPreimageRead, exec.FdHintRead, exec.FdPreimageWrite, exec.FdHintWrite: + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + default: + expected.ActiveThread().Registers[2] = exec.MipsEBADF + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + } + } else if c.cmd == 3 { + switch c.fd { + case exec.FdStdin, exec.FdPreimageRead, exec.FdHintRead: + expected.ActiveThread().Registers[2] = 0 + expected.ActiveThread().Registers[7] = 0 + case exec.FdStdout, exec.FdStderr, exec.FdPreimageWrite, exec.FdHintWrite: + expected.ActiveThread().Registers[2] = 1 + expected.ActiveThread().Registers[7] = 0 + default: + expected.ActiveThread().Registers[2] = exec.MipsEBADF + expected.ActiveThread().Registers[7] = exec.SysErrorSignal + } + } else { + expected.ActiveThread().Registers[2] = exec.MipsEINVAL + expected.ActiveThread().Registers[7] = exec.SysErrorSignal } + return ExpectNormalExecution() + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations) + + f.Fuzz(func(t *testing.T, fd Word, cmd Word, seed int64) { + tests := []testCase{{fd, cmd}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed)...) }) } func FuzzStateHintRead(f *testing.F) { - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr Word, count Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - preimageData := []byte("hello world") - preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() - oracle := testutil.StaticOracle(t, preimageData) // only used for hinting - - goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed), mtutil.WithPreimageKey(preimageKey)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysRead - state.GetRegistersRef()[4] = exec.FdHintRead - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = count - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - step := state.GetStep() - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = count - expected.ActiveThread().Registers[7] = 0 // no error + vms := GetMipsVersionTestCases(f) + type testCase struct { + addr Word + count Word + } + + preimageData := []byte("hello world") + preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.PreimageKey = preimageKey + state.GetRegistersRef()[2] = arch.SysRead + state.GetRegistersRef()[4] = exec.FdHintRead + state.GetRegistersRef()[5] = c.addr + state.GetRegistersRef()[6] = c.count + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().Registers[2] = c.count + expected.ActiveThread().Registers[7] = 0 // no error + return ExpectNormalExecution() + } + + postCheck := func(t require.TestingT, c testCase, vm VersionedVMTestCase, deps *TestDependencies, stepWitness *mipsevm.StepWitness) { + require.False(t, stepWitness.HasPreimage()) + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations). + PostCheck(postCheck) - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + f.Fuzz(func(t *testing.T, addr Word, count Word, seed int64) { + tests := []testCase{{addr, count}} + po := func() mipsevm.PreimageOracle { + return testutil.StaticOracle(t, preimageData) } + + diffTester.Run(t, tests, fuzzTestOptions(vms, seed, WithPreimageOracle(po))...) }) } func FuzzStatePreimageRead(f *testing.F) { - versions := GetMipsVersionTestCases(f) + vms := GetMipsVersionTestCases(f) + type testCase struct { + addr arch.Word + pc arch.Word + count arch.Word + preimageOffset arch.Word + } + + preexistingMemoryVal := ^arch.Word(0) + preimageValue := []byte("hello world") + preimageData := mtutil.AddPreimageLengthPrefix(preimageValue) + preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageValue)).PreimageKey() + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.PreimageKey = preimageKey + state.PreimageOffset = c.preimageOffset + state.GetCurrentThread().Cpu.PC = c.pc + state.GetCurrentThread().Cpu.NextPC = c.pc + 4 + state.GetRegistersRef()[2] = arch.SysRead + state.GetRegistersRef()[4] = exec.FdPreimageRead + state.GetRegistersRef()[5] = c.addr + state.GetRegistersRef()[6] = c.count + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetMemory().SetWord(testutil.EffAddr(c.addr), preexistingMemoryVal) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + alignment := c.addr & arch.ExtMask + writeLen := arch.WordSizeBytes - alignment + if c.count < writeLen { + writeLen = c.count + } + // Cap write length to remaining bytes of the preimage + preimageDataLen := Word(len(preimageData)) + if c.preimageOffset+writeLen > preimageDataLen { + writeLen = preimageDataLen - c.preimageOffset + } + + expected.ExpectStep() + expected.ActiveThread().Registers[2] = writeLen + expected.ActiveThread().Registers[7] = 0 // no error + expected.PreimageOffset += writeLen + if writeLen > 0 { + // Expect a memory write + var expectedMemory []byte + expectedMemory = arch.ByteOrderWord.AppendWord(expectedMemory, preexistingMemoryVal) + copy(expectedMemory[alignment:], preimageData[c.preimageOffset:c.preimageOffset+writeLen]) + expected.ExpectMemoryWrite(testutil.EffAddr(c.addr), arch.ByteOrderWord.Word(expectedMemory[:])) + } + return ExpectNormalExecution() + } + + postCheck := func(t require.TestingT, c testCase, vm VersionedVMTestCase, deps *TestDependencies, stepWitness *mipsevm.StepWitness) { + require.True(t, stepWitness.HasPreimage()) + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations). + PostCheck(postCheck) + f.Fuzz(func(t *testing.T, addr arch.Word, pc arch.Word, count arch.Word, preimageOffset arch.Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - effAddr := addr & arch.AddressMask - pc = pc & arch.AddressMask - preexistingMemoryVal := ^arch.Word(0) - preimageValue := []byte("hello world") - preimageData := mtutil.AddPreimageLengthPrefix(preimageValue) - if preimageOffset >= Word(len(preimageData)) || pc == effAddr { - t.SkipNow() - } - preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageValue)).PreimageKey() - oracle := testutil.StaticOracle(t, preimageValue) - - goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed), mtutil.WithPreimageKey(preimageKey), mtutil.WithPreimageOffset(preimageOffset), mtutil.WithPCAndNextPC(pc)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysRead - state.GetRegistersRef()[4] = exec.FdPreimageRead - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = count - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - state.GetMemory().SetWord(effAddr, preexistingMemoryVal) - step := state.GetStep() - - alignment := addr & arch.ExtMask - writeLen := arch.WordSizeBytes - alignment - if count < writeLen { - writeLen = count - } - // Cap write length to remaining bytes of the preimage - preimageDataLen := Word(len(preimageData)) - if preimageOffset+writeLen > preimageDataLen { - writeLen = preimageDataLen - preimageOffset - } - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = writeLen - expected.ActiveThread().Registers[7] = 0 // no error - expected.PreimageOffset += writeLen - if writeLen > 0 { - // Expect a memory write - var expectedMemory []byte - expectedMemory = arch.ByteOrderWord.AppendWord(expectedMemory, preexistingMemoryVal) - copy(expectedMemory[alignment:], preimageData[preimageOffset:preimageOffset+writeLen]) - expected.ExpectMemoryWrite(effAddr, arch.ByteOrderWord.Word(expectedMemory[:])) - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.True(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + pc = testutil.EffAddr(pc) + if preimageOffset >= Word(len(preimageData)) || pc == testutil.EffAddr(addr) { + t.SkipNow() } + po := func() mipsevm.PreimageOracle { + return testutil.StaticOracle(t, preimageValue) + } + + tests := []testCase{{addr, pc, count, preimageOffset}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed, WithPreimageOracle(po))...) }) } func FuzzStateHintWrite(f *testing.F) { - versions := GetMipsVersionTestCases(f) - f.Fuzz(func(t *testing.T, addr Word, count Word, hint1, hint2, hint3 []byte, randSeed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - // Make sure pc does not overlap with hint data in memory - pc := Word(0) - if addr <= 8 { - addr += 8 - } - - // Set up hint data - r := testutil.NewRandHelper(randSeed) - hints := [][]byte{hint1, hint2, hint3} - hintData := make([]byte, 0) - for _, hint := range hints { - prefixedHint := mtutil.AddHintLengthPrefix(hint) - hintData = append(hintData, prefixedHint...) - } - lastHintLen := math.Round(r.Fraction() * float64(len(hintData))) - lastHint := hintData[:int(lastHintLen)] - expectedBytesToProcess := int(count) + int(lastHintLen) - if expectedBytesToProcess > len(hintData) { - // Add an extra hint to span the rest of the hint data - randomHint := r.RandomBytes(t, expectedBytesToProcess) - prefixedHint := mtutil.AddHintLengthPrefix(randomHint) - hintData = append(hintData, prefixedHint...) - hints = append(hints, randomHint) - } - - // Set up state - oracle := &testutil.HintTrackingOracle{} - goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(randSeed), mtutil.WithLastHint(lastHint), mtutil.WithPCAndNextPC(pc)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysWrite - state.GetRegistersRef()[4] = exec.FdHintWrite - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = count - step := state.GetStep() - err := state.GetMemory().SetMemoryRange(addr, bytes.NewReader(hintData[int(lastHintLen):])) - require.NoError(t, err) - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - - // Set up expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().Registers[2] = count - expected.ActiveThread().Registers[7] = 0 // no error - // Figure out hint expectations - var expectedHints [][]byte - expectedLastHint := make([]byte, 0) - byteIndex := 0 - for _, hint := range hints { - hintDataLength := len(hint) + 4 // Hint data + prefix - hintLastByteIndex := hintDataLength + byteIndex - 1 - if hintLastByteIndex < expectedBytesToProcess { - expectedHints = append(expectedHints, hint) - } else { - expectedLastHint = hintData[byteIndex:expectedBytesToProcess] - break - } - byteIndex += hintDataLength - } - expected.LastHint = expectedLastHint - - // Run state transition - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - // Validate - require.Equal(t, expectedHints, oracle.Hints()) - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + vms := GetMipsVersionTestCases(f) + type testCase struct { + // Fuzz inputs + addr Word + count Word + hint1 []byte + hint2 []byte + hint3 []byte + // Cached calculations + hintData []byte + lastHint []byte + expectedHints [][]byte + expectedLastHint []byte + } + + cacheHintCalculations := func(t require.TestingT, c *testCase) { + if c.hintData != nil { + // Already cached + return + } + + // Set up hint data + r := testutil.NewRandHelper(seed) + hints := [][]byte{c.hint1, c.hint2, c.hint3} + c.hintData = make([]byte, 0) + for _, hint := range hints { + prefixedHint := mtutil.AddHintLengthPrefix(hint) + c.hintData = append(c.hintData, prefixedHint...) + } + lastHintLen := math.Round(r.Fraction() * float64(len(c.hintData))) + c.lastHint = c.hintData[:int(lastHintLen)] + expectedBytesToProcess := int(c.count) + int(lastHintLen) + if expectedBytesToProcess > len(c.hintData) { + // Add an extra hint to span the rest of the hint data + randomHint := r.RandomBytes(t, expectedBytesToProcess) + prefixedHint := mtutil.AddHintLengthPrefix(randomHint) + c.hintData = append(c.hintData, prefixedHint...) + hints = append(hints, randomHint) + } + + // Figure out hint expectations + c.expectedLastHint = make([]byte, 0) + byteIndex := 0 + for _, hint := range hints { + hintDataLength := len(hint) + 4 // Hint data + prefix + hintLastByteIndex := hintDataLength + byteIndex - 1 + if hintLastByteIndex < expectedBytesToProcess { + c.expectedHints = append(c.expectedHints, hint) + } else { + c.expectedLastHint = c.hintData[byteIndex:expectedBytesToProcess] + break + } + byteIndex += hintDataLength + } + } + + initState := func(t require.TestingT, c *testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + cacheHintCalculations(t, c) + state.LastHint = c.lastHint + state.GetRegistersRef()[2] = arch.SysWrite + state.GetRegistersRef()[4] = exec.FdHintWrite + state.GetRegistersRef()[5] = c.addr + state.GetRegistersRef()[6] = c.count + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + err := state.GetMemory().SetMemoryRange(c.addr, bytes.NewReader(c.hintData[int(len(c.lastHint)):])) + require.NoError(t, err) + } + + setExpectations := func(t require.TestingT, c *testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + cacheHintCalculations(t, c) + expected.ExpectStep() + expected.ActiveThread().Registers[2] = c.count + expected.ActiveThread().Registers[7] = 0 // no error + expected.LastHint = c.expectedLastHint + return ExpectNormalExecution() + } + + postCheck := func(t require.TestingT, c *testCase, vm VersionedVMTestCase, deps *TestDependencies, stepWitness *mipsevm.StepWitness) { + oracle, ok := deps.po.(*testutil.HintTrackingOracle) + require.True(t, ok) + require.Equal(t, c.expectedHints, oracle.Hints()) + } + + diffTester := NewDiffTester(NoopTestNamer[*testCase]). + InitState(initState, mtutil.WithPCAndNextPC(0)). + SetExpectations(setExpectations). + PostCheck(postCheck) + + f.Fuzz(func(t *testing.T, addr Word, count Word, hint1, hint2, hint3 []byte, seed int64) { + // Make sure pc does not overlap with hint data in memory + if addr <= 8 { + addr += 8 + } + + po := func() mipsevm.PreimageOracle { + return &testutil.HintTrackingOracle{} } + + tests := []*testCase{ + { + addr: addr, + count: count, + hint1: hint1, + hint2: hint2, + hint3: hint3, + }, + } + diffTester.Run(t, tests, fuzzTestOptions(vms, seed, WithPreimageOracle(po))...) }) } func FuzzStatePreimageWrite(f *testing.F) { - versions := GetMipsVersionTestCases(f) + vms := GetMipsVersionTestCases(f) + type testCase struct { + addr arch.Word + count arch.Word + } + + preexistingMemoryVal := [8]byte{0x12, 0x34, 0x56, 0x78, 0x87, 0x65, 0x43, 0x21} + preimageData := []byte("hello world") + preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + state.GetRegistersRef()[2] = arch.SysWrite + state.GetRegistersRef()[4] = exec.FdPreimageWrite + state.GetRegistersRef()[5] = c.addr + state.GetRegistersRef()[6] = c.count + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) + state.GetMemory().SetWord(testutil.EffAddr(c.addr), arch.ByteOrderWord.Word(preexistingMemoryVal[:])) + } + + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expectBytesWritten := c.count + alignment := c.addr & arch.ExtMask + sz := arch.WordSizeBytes - alignment + if sz < expectBytesWritten { + expectBytesWritten = sz + } + + expected.ExpectStep() + expected.PreimageOffset = 0 + expected.ActiveThread().Registers[2] = expectBytesWritten + expected.ActiveThread().Registers[7] = 0 // No error + expected.PreimageKey = preimageKey + if expectBytesWritten > 0 { + // Copy original preimage key, but shift it left by expectBytesWritten + copy(expected.PreimageKey[:], preimageKey[expectBytesWritten:]) + // Copy memory data to rightmost expectedBytesWritten + copy(expected.PreimageKey[32-expectBytesWritten:], preexistingMemoryVal[alignment:]) + } + return ExpectNormalExecution() + } + + postCheck := func(t require.TestingT, c testCase, vm VersionedVMTestCase, deps *TestDependencies, stepWitness *mipsevm.StepWitness) { + require.False(t, stepWitness.HasPreimage()) + } + + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState, mtutil.WithPCAndNextPC(0), mtutil.WithPreimageKey(preimageKey), mtutil.WithPreimageOffset(128)). + SetExpectations(setExpectations). + PostCheck(postCheck) + f.Fuzz(func(t *testing.T, addr arch.Word, count arch.Word, seed int64) { - for _, v := range versions { - t.Run(v.Name, func(t *testing.T) { - // Make sure pc does not overlap with preimage data in memory - pc := Word(0) - if addr <= 8 { - addr += 8 - } - effAddr := addr & arch.AddressMask - preexistingMemoryVal := [8]byte{0x12, 0x34, 0x56, 0x78, 0x87, 0x65, 0x43, 0x21} - preimageData := []byte("hello world") - preimageKey := preimage.Keccak256Key(crypto.Keccak256Hash(preimageData)).PreimageKey() - oracle := testutil.StaticOracle(t, preimageData) - - goVm := v.VMFactory(oracle, os.Stdout, os.Stderr, testutil.CreateLogger(), - mtutil.WithRandomization(seed), mtutil.WithPreimageKey(preimageKey), mtutil.WithPreimageOffset(128), mtutil.WithPCAndNextPC(pc)) - state := goVm.GetState() - state.GetRegistersRef()[2] = arch.SysWrite - state.GetRegistersRef()[4] = exec.FdPreimageWrite - state.GetRegistersRef()[5] = addr - state.GetRegistersRef()[6] = count - testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) - state.GetMemory().SetWord(effAddr, arch.ByteOrderWord.Word(preexistingMemoryVal[:])) - step := state.GetStep() - - expectBytesWritten := count - alignment := addr & arch.ExtMask - sz := arch.WordSizeBytes - alignment - if sz < expectBytesWritten { - expectBytesWritten = sz - } - - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.PreimageOffset = 0 - expected.ActiveThread().Registers[2] = expectBytesWritten - expected.ActiveThread().Registers[7] = 0 // No error - expected.PreimageKey = preimageKey - if expectBytesWritten > 0 { - // Copy original preimage key, but shift it left by expectBytesWritten - copy(expected.PreimageKey[:], preimageKey[expectBytesWritten:]) - // Copy memory data to rightmost expectedBytesWritten - copy(expected.PreimageKey[32-expectBytesWritten:], preexistingMemoryVal[alignment:]) - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) - - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + if addr <= 8 { + addr += 8 + } + + po := func() mipsevm.PreimageOracle { + return testutil.StaticOracle(t, preimageData) } + + tests := []testCase{{addr, count}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed, WithPreimageOracle(po))...) }) } + +func fuzzTestOptions(vms []VersionedVMTestCase, seed int64, opts ...TestOption) []TestOption { + testOpts := []TestOption{ + WithVms(vms), + WithRandomSeed(seed), + SkipAutomaticMemoryReservationTests(), + } + testOpts = append(testOpts, opts...) + return testOpts +} diff --git a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go index 8e05194430a00..56933430c62f9 100644 --- a/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go +++ b/cannon/mipsevm/tests/fuzz_evm_multithreaded_test.go @@ -1,11 +1,11 @@ package tests import ( - "os" "testing" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" @@ -15,56 +15,54 @@ import ( ) func FuzzStateSyscallCloneMT(f *testing.F) { - versions := GetMipsVersionTestCases(f) - require.NotZero(f, len(versions), "must have at least one multithreaded version supported") - f.Fuzz(func(t *testing.T, nextThreadId, stackPtr Word, seed int64, version uint) { - v := versions[int(version)%len(versions)] - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(seed)) - state := mtutil.GetMtState(t, goVm) + vms := GetMipsVersionTestCases(f) + type testCase struct { + nextThreadId Word + stackPtr Word + } + + initState := func(t require.TestingT, c testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { // Update existing threads to avoid collision with nextThreadId - if mtutil.FindThread(state, nextThreadId) != nil { + if mtutil.FindThread(state, c.nextThreadId) != nil { for i, t := range mtutil.GetAllThreads(state) { - t.ThreadId = nextThreadId - Word(i+1) + t.ThreadId = c.nextThreadId - Word(i+1) } } - // Setup - state.NextThreadId = nextThreadId + state.NextThreadId = c.nextThreadId testutil.StoreInstruction(state.GetMemory(), state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = arch.SysClone state.GetRegistersRef()[4] = exec.ValidCloneFlags - state.GetRegistersRef()[5] = stackPtr - step := state.GetStep() + state.GetRegistersRef()[5] = c.stackPtr + } - // Set up expectations - expected := mtutil.NewExpectedState(t, state) + setExpectations := func(t require.TestingT, c testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expected.Step += 1 // Set original thread expectations - expected.PrestateActiveThread().PC = state.GetCpu().NextPC - expected.PrestateActiveThread().NextPC = state.GetCpu().NextPC + 4 - expected.PrestateActiveThread().Registers[2] = nextThreadId + prestateNextPC := expected.PrestateActiveThread().NextPC + expected.PrestateActiveThread().PC = prestateNextPC + expected.PrestateActiveThread().NextPC = prestateNextPC + 4 + expected.PrestateActiveThread().Registers[2] = c.nextThreadId expected.PrestateActiveThread().Registers[7] = 0 // Set expectations for new, cloned thread - expected.ActiveThreadId = nextThreadId expectedNewThread := expected.ExpectNewThread() - expectedNewThread.PC = state.GetCpu().NextPC - expectedNewThread.NextPC = state.GetCpu().NextPC + 4 + expectedNewThread.PC = prestateNextPC + expectedNewThread.NextPC = prestateNextPC + 4 expectedNewThread.Registers[register.RegSyscallNum] = 0 expectedNewThread.Registers[register.RegSyscallErrno] = 0 - expectedNewThread.Registers[register.RegSP] = stackPtr - expected.NextThreadId = nextThreadId + 1 - expected.StepsSinceLastContextSwitch = 0 - if state.TraverseRight { - expected.RightStackSize += 1 - } else { - expected.LeftStackSize += 1 - } + expectedNewThread.Registers[register.RegSP] = c.stackPtr + expected.ExpectActiveThreadId(c.nextThreadId) + expected.ExpectNextThreadId(c.nextThreadId + 1) + expected.ExpectContextSwitch() + return ExpectNormalExecution() + } - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - require.False(t, stepWitness.HasPreimage()) + diffTester := NewDiffTester(NoopTestNamer[testCase]). + InitState(initState). + SetExpectations(setExpectations) - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, multithreaded.GetStateHashFn(), v.Contracts) + f.Fuzz(func(t *testing.T, nextThreadId, stackPtr Word, seed int64) { + tests := []testCase{{nextThreadId, stackPtr}} + diffTester.Run(t, tests, fuzzTestOptions(vms, seed)...) }) } diff --git a/cannon/mipsevm/tests/testfuncs_test.go b/cannon/mipsevm/tests/testfuncs_test.go index 031ffb5bfdcdd..797f1f8919bad 100644 --- a/cannon/mipsevm/tests/testfuncs_test.go +++ b/cannon/mipsevm/tests/testfuncs_test.go @@ -2,11 +2,11 @@ package tests import ( "fmt" - "os" "testing" "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/cannon/mipsevm" "github.com/ethereum-optimism/optimism/cannon/mipsevm/arch" "github.com/ethereum-optimism/optimism/cannon/mipsevm/exec" "github.com/ethereum-optimism/optimism/cannon/mipsevm/multithreaded" @@ -25,59 +25,54 @@ type operatorTestCase struct { expectRes Word } -func testOperators(t *testing.T, cases []operatorTestCase, mips32Insn bool) { - versions := GetMipsVersionTestCases(t) - for _, v := range versions { - for i, tt := range cases { - // sign extend inputs for 64-bit compatibility - if mips32Insn { - tt.rs = randomizeUpperWord(signExtend64(tt.rs)) - tt.rt = randomizeUpperWord(signExtend64(tt.rt)) - tt.expectRes = signExtend64(tt.expectRes) - } +func (c operatorTestCase) Name() string { + return c.name +} + +func testOperators(t *testing.T, testCases []operatorTestCase, mips32Insn bool) { + var cases []operatorTestCase + for _, tt := range testCases { + if mips32Insn { + tt.rs = randomizeUpperWord(signExtend64(tt.rs)) + tt.rt = randomizeUpperWord(signExtend64(tt.rt)) + tt.expectRes = signExtend64(tt.expectRes) + } + cases = append(cases, tt) + } - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - validator := testutil.NewEvmValidator(t, v.StateHashFn, v.Contracts) - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPC(0), mtutil.WithNextPC(4)) - state := goVm.GetState() - var insn uint32 - var baseReg uint32 = 17 - var rtReg uint32 - var rdReg uint32 - if tt.isImm { - rtReg = 8 - insn = tt.opcode<<26 | baseReg<<21 | rtReg<<16 | uint32(tt.imm) - state.GetRegistersRef()[rtReg] = tt.rt - state.GetRegistersRef()[baseReg] = tt.rs - } else { - rtReg = 18 - rdReg = 8 - insn = baseReg<<21 | rtReg<<16 | rdReg<<11 | tt.funct - state.GetRegistersRef()[baseReg] = tt.rs - state.GetRegistersRef()[rtReg] = tt.rt - } - testutil.StoreInstruction(state.GetMemory(), 0, insn) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - if tt.isImm { - expected.ActiveThread().Registers[rtReg] = tt.expectRes - } else { - expected.ActiveThread().Registers[rdReg] = tt.expectRes - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - validator.ValidateEVM(t, stepWitness, step, goVm) - }) + pc := arch.Word(0) + rtReg := uint32(8) + rdReg := uint32(18) + + initState := func(t require.TestingT, tt operatorTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + var insn uint32 + var baseReg uint32 = 17 + if tt.isImm { + insn = tt.opcode<<26 | baseReg<<21 | rtReg<<16 | uint32(tt.imm) + state.GetRegistersRef()[rtReg] = tt.rt + state.GetRegistersRef()[baseReg] = tt.rs + } else { + insn = baseReg<<21 | rtReg<<16 | rdReg<<11 | tt.funct + state.GetRegistersRef()[baseReg] = tt.rs + state.GetRegistersRef()[rtReg] = tt.rt + } + storeInsnWithCache(state, goVm, pc, insn) + } + + setExpectations := func(t require.TestingT, tt operatorTestCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + if tt.isImm { + expected.ActiveThread().Registers[rtReg] = tt.expectRes + } else { + expected.ActiveThread().Registers[rdReg] = tt.expectRes } + return ExpectNormalExecution() } + + NewDiffTester((operatorTestCase).Name). + InitState(initState, mtutil.WithPCAndNextPC(pc)). + SetExpectations(setExpectations). + Run(t, cases) } type mulDivTestCase struct { @@ -116,14 +111,14 @@ func testMulDiv(t *testing.T, templateCases []mulDivTestCase, mips32Insn bool) { rtReg := uint32(0xa) pc := arch.Word(0) - initState := func(tt mulDivTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt mulDivTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { insn := tt.opcode<<26 | baseReg<<21 | rtReg<<16 | tt.rdReg<<11 | tt.funct state.GetRegistersRef()[rtReg] = tt.rt state.GetRegistersRef()[baseReg] = tt.rs - testutil.StoreInstruction(state.GetMemory(), pc, insn) + storeInsnWithCache(state, goVm, pc, insn) } - setExpectations := func(tt mulDivTestCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt mulDivTestCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { if tt.panicMsg != "" { return ExpectVmPanic(tt.panicMsg, tt.revertMsg) } else { @@ -169,16 +164,16 @@ func testLoadStore(t *testing.T, cases []loadStoreTestCase) { rtReg := uint32(8) pc := arch.Word(0) - initState := func(tt loadStoreTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { + initState := func(t require.TestingT, tt loadStoreTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { insn := tt.opcode<<26 | baseReg<<21 | rtReg<<16 | tt.imm - testutil.StoreInstruction(state.GetMemory(), pc, insn) + storeInsnWithCache(state, goVm, pc, insn) state.GetMemory().SetWord(tt.effAddr(), tt.memVal) state.GetRegistersRef()[rtReg] = tt.rt state.GetRegistersRef()[baseReg] = tt.base } - setExpectations := func(tt loadStoreTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt loadStoreTestCase, expect *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expect.ExpectStep() if tt.expectMemVal != 0 { expect.ExpectMemoryWrite(tt.effAddr(), tt.expectMemVal) @@ -205,37 +200,35 @@ type branchTestCase struct { offset uint16 } +func (t branchTestCase) Name() string { + return t.name +} + func testBranch(t *testing.T, cases []branchTestCase) { - versions := GetMipsVersionTestCases(t) - for _, v := range versions { - for i, tt := range cases { - testName := fmt.Sprintf("%v (%v)", tt.name, v.Name) - t.Run(testName, func(t *testing.T) { - goVm := v.VMFactory(nil, os.Stdout, os.Stderr, testutil.CreateLogger(), mtutil.WithRandomization(int64(i)), mtutil.WithPCAndNextPC(tt.pc)) - state := goVm.GetState() - const rsReg = 8 // t0 - insn := tt.opcode<<26 | rsReg<<21 | tt.regimm<<16 | uint32(tt.offset) - testutil.StoreInstruction(state.GetMemory(), tt.pc, insn) - state.GetRegistersRef()[rsReg] = Word(tt.rs) - step := state.GetStep() - - // Setup expectations - expected := mtutil.NewExpectedState(t, state) - expected.ExpectStep() - expected.ActiveThread().NextPC = tt.expectNextPC - if tt.expectLink { - expected.ActiveThread().Registers[31] = state.GetPC() + 8 - } - - stepWitness, err := goVm.Step(true) - require.NoError(t, err) - - // Check expectations - expected.Validate(t, state) - testutil.ValidateEVM(t, stepWitness, step, goVm, v.StateHashFn, v.Contracts) - }) + initState := func(t require.TestingT, tt branchTestCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + const rsReg = 8 // t0 + insn := tt.opcode<<26 | rsReg<<21 | tt.regimm<<16 | uint32(tt.offset) + + state.GetCurrentThread().Cpu.PC = tt.pc + state.GetCurrentThread().Cpu.NextPC = tt.pc + 4 + storeInsnWithCache(state, goVm, tt.pc, insn) + state.GetRegistersRef()[rsReg] = Word(tt.rs) + } + + setExpectations := func(t require.TestingT, tt branchTestCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + expected.ExpectStep() + expected.ActiveThread().NextPC = tt.expectNextPC + if tt.expectLink { + expected.ActiveThread().Registers[31] = tt.pc + 8 } + + return ExpectNormalExecution() } + + NewDiffTester((branchTestCase).Name). + InitState(initState). + SetExpectations(setExpectations). + Run(t, cases) } func testNoopSyscall(t *testing.T, vm VersionedVMTestCase, syscalls map[string]uint32) { @@ -253,12 +246,12 @@ func testNoopSyscall(t *testing.T, vm VersionedVMTestCase, syscalls map[string]u cases = append(cases, testCase{name: name, sycallNum: arch.Word(syscallNum)}) } - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = tt.sycallNum // Set syscall number } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { expected.ExpectStep() expected.ActiveThread().Registers[2] = 0 expected.ActiveThread().Registers[7] = 0 @@ -288,12 +281,12 @@ func testUnsupportedSyscall(t *testing.T, vm VersionedVMTestCase, unsupportedSys cases = append(cases, testCase{name: name, sycallNum: arch.Word(syscallNum)}) } - initState := func(tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper) { - testutil.StoreInstruction(state.Memory, state.GetPC(), syscallInsn) + initState := func(t require.TestingT, tt testCase, state *multithreaded.State, vm VersionedVMTestCase, r *testutil.RandHelper, goVm mipsevm.FPVM) { + storeInsnWithCache(state, goVm, state.GetPC(), syscallInsn) state.GetRegistersRef()[2] = tt.sycallNum // Set syscall number } - setExpectations := func(tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { + setExpectations := func(t require.TestingT, tt testCase, expected *mtutil.ExpectedState, vm VersionedVMTestCase) ExpectedExecResult { goErr := fmt.Sprintf("unrecognized syscall: %v", tt.sycallNum) return ExpectVmPanic(goErr, "unimplemented syscall") } diff --git a/cannon/mipsevm/testutil/rand.go b/cannon/mipsevm/testutil/rand.go index 481a612a14159..d90ae6d45395e 100644 --- a/cannon/mipsevm/testutil/rand.go +++ b/cannon/mipsevm/testutil/rand.go @@ -72,7 +72,8 @@ func (h *RandHelper) RandHint() []byte { func (h *RandHelper) RandRegisters() *[32]arch.Word { registers := new([32]arch.Word) - for i := 0; i < 32; i++ { + // Start from 1 as register 0 is the "zero" register (always 0) + for i := 1; i < 32; i++ { registers[i] = h.Word() } return registers diff --git a/cannon/mipsevm/versions/state.go b/cannon/mipsevm/versions/state.go index 3c02599e2f9a8..e325e93c75d86 100644 --- a/cannon/mipsevm/versions/state.go +++ b/cannon/mipsevm/versions/state.go @@ -27,6 +27,14 @@ func LoadStateFromFile(path string) (*VersionedState, error) { return serialize.LoadSerializedBinary[VersionedState](path) } +func LoadStateFromFileWithLargeICache(path string) (*VersionedStateWithLargeICache, error) { + if !serialize.IsBinaryFile(path) { + // JSON states are always singlethreaded v1 which is no longer supported + return nil, fmt.Errorf("%w: %s", ErrUnsupportedVersion, VersionSingleThreaded) + } + return serialize.LoadSerializedBinary[VersionedStateWithLargeICache](path) +} + func NewFromState(vers StateVersion, state mipsevm.FPVMState) (*VersionedState, error) { switch state := state.(type) { case *multithreaded.State: @@ -61,11 +69,6 @@ func (s *VersionedState) CreateVM(logger log.Logger, po mipsevm.PreimageOracle, func FeaturesForVersion(version StateVersion) mipsevm.FeatureToggles { features := mipsevm.FeatureToggles{} // Set any required feature toggles based on the state version here. - if version >= VersionMultiThreaded64_v4 { - features.SupportMinimalSysEventFd2 = true - features.SupportDclzDclo = true - features.SupportNoopMprotect = true - } if version >= VersionMultiThreaded64_v5 { features.SupportWorkingSysGetRandom = true } @@ -106,3 +109,29 @@ func (s *VersionedState) Deserialize(in io.Reader) error { func (s *VersionedState) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("%w for type %T", ErrJsonNotSupported, s.FPVMState) } + +// VersionedStateWithLargeICache is a VersionedState that allocates a large memory region for the i-cache. +type VersionedStateWithLargeICache struct { + VersionedState +} + +func (s *VersionedStateWithLargeICache) Deserialize(in io.Reader) error { + bin := serialize.NewBinaryReader(in) + if err := bin.ReadUInt(&s.Version); err != nil { + return err + } + + if IsSupportedMultiThreaded64(s.Version) { + if arch.IsMips32 { + return ErrUnsupportedMipsArch + } + state := &multithreaded.State{UseLargeICache: true} + if err := state.Deserialize(in); err != nil { + return err + } + s.FPVMState = state + return nil + } else { + return fmt.Errorf("%w: %d", ErrUnknownVersion, s.Version) + } +} diff --git a/cannon/testdata/go-1-23/alloc/go.mod b/cannon/testdata/go-1-23/alloc/go.mod index bed95427d2cad..9cd0eb04f3188 100644 --- a/cannon/testdata/go-1-23/alloc/go.mod +++ b/cannon/testdata/go-1-23/alloc/go.mod @@ -7,8 +7,8 @@ toolchain go1.23.8 require github.com/ethereum-optimism/optimism v0.0.0 require ( - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/sys v0.31.0 // indirect ) replace github.com/ethereum-optimism/optimism v0.0.0 => ./../../../.. diff --git a/cannon/testdata/go-1-23/alloc/go.sum b/cannon/testdata/go-1-23/alloc/go.sum index ae65245462a89..a52ee74a11e53 100644 --- a/cannon/testdata/go-1-23/alloc/go.sum +++ b/cannon/testdata/go-1-23/alloc/go.sum @@ -4,9 +4,9 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cannon/testdata/go-1-23/claim/go.mod b/cannon/testdata/go-1-23/claim/go.mod index b18045136a251..de70a6a890ee0 100644 --- a/cannon/testdata/go-1-23/claim/go.mod +++ b/cannon/testdata/go-1-23/claim/go.mod @@ -7,8 +7,8 @@ toolchain go1.23.8 require github.com/ethereum-optimism/optimism v0.0.0 require ( - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/sys v0.31.0 // indirect ) replace github.com/ethereum-optimism/optimism v0.0.0 => ./../../../.. diff --git a/cannon/testdata/go-1-23/claim/go.sum b/cannon/testdata/go-1-23/claim/go.sum index ae65245462a89..a52ee74a11e53 100644 --- a/cannon/testdata/go-1-23/claim/go.sum +++ b/cannon/testdata/go-1-23/claim/go.sum @@ -4,9 +4,9 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cannon/testdata/go-1-24/alloc/go.mod b/cannon/testdata/go-1-24/alloc/go.mod index 9dd982e7ad7d5..2ba7887a35a3d 100644 --- a/cannon/testdata/go-1-24/alloc/go.mod +++ b/cannon/testdata/go-1-24/alloc/go.mod @@ -7,8 +7,8 @@ toolchain go1.24.2 require github.com/ethereum-optimism/optimism v0.0.0 require ( - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/sys v0.31.0 // indirect ) replace github.com/ethereum-optimism/optimism v0.0.0 => ./../../../.. diff --git a/cannon/testdata/go-1-24/alloc/go.sum b/cannon/testdata/go-1-24/alloc/go.sum index ae65245462a89..a52ee74a11e53 100644 --- a/cannon/testdata/go-1-24/alloc/go.sum +++ b/cannon/testdata/go-1-24/alloc/go.sum @@ -4,9 +4,9 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/cannon/testdata/go-1-24/claim/go.mod b/cannon/testdata/go-1-24/claim/go.mod index 9c2fbdba88531..aec47900e8fa4 100644 --- a/cannon/testdata/go-1-24/claim/go.mod +++ b/cannon/testdata/go-1-24/claim/go.mod @@ -7,8 +7,8 @@ toolchain go1.24.2 require github.com/ethereum-optimism/optimism v0.0.0 require ( - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/sys v0.31.0 // indirect ) replace github.com/ethereum-optimism/optimism v0.0.0 => ./../../../.. diff --git a/cannon/testdata/go-1-24/claim/go.sum b/cannon/testdata/go-1-24/claim/go.sum index ae65245462a89..a52ee74a11e53 100644 --- a/cannon/testdata/go-1-24/claim/go.sum +++ b/cannon/testdata/go-1-24/claim/go.sum @@ -4,9 +4,9 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/codecov.yml b/codecov.yml index 9f41fd222787e..36d186e2c984c 100644 --- a/codecov.yml +++ b/codecov.yml @@ -15,11 +15,6 @@ ignore: - "packages/contracts-bedrock/scripts/**/*.sol" - "packages/contracts-bedrock/src/vendor/**/*.sol" - "packages/contracts-bedrock/src/interfaces/**/*.sol" - # TODO: add coverage for MIPS64 back once tests are merged in - - "packages/contracts-bedrock/src/cannon/MIPS64.sol" - - "packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol" - - "packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol" - - "packages/contracts-bedrock/src/cannon/libraries/MIPS64Syscalls.sol" coverage: status: diff --git a/devnet-sdk/shell/env/devnet.go b/devnet-sdk/shell/env/devnet.go index e766a0032ef7f..2f1d3baf6eb88 100644 --- a/devnet-sdk/shell/env/devnet.go +++ b/devnet-sdk/shell/env/devnet.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/devnet-sdk/controller/surface" "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/params" ) @@ -138,8 +139,12 @@ func fixupDevnetConfig(config *descriptors.DevnetEnvironment) error { return fmt.Errorf("invalid L1 ID: %s", config.L1.ID) } if config.L1.Config == nil { - config.L1.Config = ¶ms.ChainConfig{ - ChainID: l1ID, + if l1Config := eth.L1ChainConfigByChainID(eth.ChainIDFromBig(l1ID)); l1Config != nil { + config.L1.Config = l1Config + } else { + config.L1.Config = ¶ms.ChainConfig{ + ChainID: l1ID, + } } } for _, l2Chain := range config.L2 { diff --git a/devnet-sdk/system/system.go b/devnet-sdk/system/system.go index 54cb37cdffecc..d282e9ee01d4d 100644 --- a/devnet-sdk/system/system.go +++ b/devnet-sdk/system/system.go @@ -8,8 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/dial" ) type system struct { @@ -102,11 +101,10 @@ func (i *interopSystem) Supervisor(ctx context.Context) (Supervisor, error) { return i.supervisor, nil } - cl, err := client.NewRPC(ctx, nil, i.supervisorRPC) + supervisor, err := dial.DialSupervisorClientWithTimeout(ctx, nil, i.supervisorRPC) if err != nil { return nil, fmt.Errorf("failed to dial supervisor RPC: %w", err) } - supervisor := sources.NewSupervisorClient(cl) i.supervisor = supervisor return supervisor, nil } diff --git a/devnet-sdk/testing/testlib/validators/forks.go b/devnet-sdk/testing/testlib/validators/forks.go index 33c685ad51a52..f60dcaa2e73e9 100644 --- a/devnet-sdk/testing/testlib/validators/forks.go +++ b/devnet-sdk/testing/testlib/validators/forks.go @@ -62,6 +62,8 @@ func IsForkActivated(c *params.ChainConfig, forkName rollup.ForkName, timestamp return c.IsOptimismHolocene(timestamp), nil case rollup.Isthmus: return c.IsOptimismIsthmus(timestamp), nil + case rollup.Jovian: + return c.IsOptimismJovian(timestamp), nil case rollup.Interop: return c.IsInterop(timestamp), nil default: diff --git a/docs/security-reviews/2025_04-Interop-Portal-Cantina.pdf b/docs/security-reviews/2025_04-Interop-Portal-Cantina.pdf index e53afca2f1b7f..32d20fb0239c9 100644 Binary files a/docs/security-reviews/2025_04-Interop-Portal-Cantina.pdf and b/docs/security-reviews/2025_04-Interop-Portal-Cantina.pdf differ diff --git a/docs/security-reviews/2025_06-Cannon-3DOC.pdf b/docs/security-reviews/2025_06-Cannon-3DOC.pdf index c3bff38beed9d..7251800fe6110 100644 Binary files a/docs/security-reviews/2025_06-Cannon-3DOC.pdf and b/docs/security-reviews/2025_06-Cannon-3DOC.pdf differ diff --git a/docs/security-reviews/2025-06-Spearbit-Cannon-fix-review.pdf b/docs/security-reviews/2025_06-Spearbit-Cannon-fix-review.pdf similarity index 100% rename from docs/security-reviews/2025-06-Spearbit-Cannon-fix-review.pdf rename to docs/security-reviews/2025_06-Spearbit-Cannon-fix-review.pdf diff --git a/docs/security-reviews/2025_09-U16a-Spearbit.pdf b/docs/security-reviews/2025_09-U16a-Spearbit.pdf new file mode 100644 index 0000000000000..1952120d83e89 Binary files /dev/null and b/docs/security-reviews/2025_09-U16a-Spearbit.pdf differ diff --git a/docs/security-reviews/README.md b/docs/security-reviews/README.md index 0cd5c4f543b7f..0b3f4dc63cc5f 100644 --- a/docs/security-reviews/README.md +++ b/docs/security-reviews/README.md @@ -40,10 +40,11 @@ Please see the report for the specific details. | 2025-04 | Cantina (contest) | Interop Portal Contracts (u16) | [2025_04-Interop-Portal-Cantina.pdf](./2025_04-Interop-Portal-Cantina.pdf) | e4b921c9dbf8cd3a8db20ef4f15e0e2aa495fcc3 | op-contracts/v4.0.0 | | 2025-05 | Spearbit | Interop Portal Contracts (u16) | [2025_05-Interop-Portal-Spearbit.pdf](./2025_05-Interop-Portal-Spearbit.pdf) | 7cd84fed9554193c2dcd683e1ff2d0e2605448f6 | op-contracts/v4.0.0 | | 2025-05 | Coinbase Protocol Security | Cannon updates to support Go 1.23 and Kona | [2025_05-Cannon-Go-Updates-Coinbase.pdf](./2025_05-Cannon-Go-Updates-Coinbase.pdf) | 4c68444bc9b130e892b52cacf67b31f0424fb6d0 | | -| 2025-06 | Spearbit | Cannon Go 1.23 support fix review | [2025-06-Spearbit-Cannon-fix-review.pdf](./2025-06-Spearbit-Cannon-fix-review.pdf) | ffe3d5fed05cabf46a67ea00627a0959c0caa0b5 | op-contracts/v4.0.0 | +| 2025-06 | Spearbit | Cannon Go 1.23 support fix review | [2025_06-Spearbit-Cannon-fix-review.pdf](./2025_06-Spearbit-Cannon-fix-review.pdf) | ffe3d5fed05cabf46a67ea00627a0959c0caa0b5 | op-contracts/v4.0.0 | | 2025-06 | Radiant Labs | Cannon Go 1.24 support | [2025_06-Cannon-3DOC.pdf](./2025_06-Cannon-3DOC.pdf) | 689111fca9a10e6670ba0b5c7f1a549a212c855b | | | 2025-05 | Spearbit | Upgrade 16 | [2025_05-Upgrade16-Spearbit.pdf](./2025_05-Upgrade16-Spearbit.pdf) / [Auditor hosted report][SpearbitMay25] | 54c19f6acb7a6d3505f884bae601733d3d54a3a6 | op-contracts/v4.0.0 | | 2025-07 | Spearbit | VerifyOPCM | [2025_07-VerifyOPCM-Spearbit.pdf](./2025_07-VerifyOPCM-Spearbit.pdf) / [Auditor hosted report][SpearbitJuly25] | 731280c6fc0ad184d252e0fb1d0ad12b5f59fd60 | op-contracts/v4.0.0 | +| 2025-09 | Spearbit | U16a | [2025_09-U16a-Spearbit.pdf](./2025_09-U16a-Spearbit.pdf) | 475801690f7a451469ee4da87b5fe3c54c92f372 | op-contracts/v4.1.0 | diff --git a/go.mod b/go.mod index 9c99ebaf97979..f6a3a144e18f4 100644 --- a/go.mod +++ b/go.mod @@ -8,26 +8,28 @@ require ( github.com/BurntSushi/toml v1.5.0 github.com/Masterminds/semver/v3 v3.3.1 github.com/andybalholm/brotli v1.1.0 + github.com/base/go-bip39 v1.1.0 github.com/bmatcuk/doublestar/v4 v4.8.1 github.com/btcsuite/btcd v0.24.2 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 github.com/cockroachdb/pebble v1.1.5 github.com/coder/websocket v1.8.13 - github.com/consensys/gnark-crypto v0.16.0 + github.com/consensys/gnark-crypto v0.18.0 github.com/crate-crypto/go-kzg-4844 v1.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/docker/docker v27.5.1+incompatible github.com/docker/go-connections v0.5.0 - github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 + github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20250603144016-9c45ca7d4508 - github.com/ethereum/go-ethereum v1.15.11 + github.com/ethereum/go-ethereum v1.16.3 github.com/ethstorage/da-server v0.0.0-20241224013916-2bd2256b6a70 github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.9.0 github.com/go-task/slim-sprig/v3 v3.0.0 github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb github.com/google/go-cmp v0.7.0 + github.com/google/go-github/v55 v55.0.0 github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.3 @@ -61,15 +63,15 @@ require ( github.com/schollz/progressbar/v3 v3.18.0 github.com/spf13/afero v1.12.0 github.com/stretchr/testify v1.10.0 - github.com/tyler-smith/go-bip39 v1.1.0 github.com/urfave/cli/v2 v2.27.6 go.opentelemetry.io/otel v1.34.0 go.opentelemetry.io/otel/trace v1.34.0 - golang.org/x/crypto v0.35.0 + golang.org/x/crypto v0.36.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/mod v0.22.0 + golang.org/x/oauth2 v0.25.0 golang.org/x/sync v0.14.0 - golang.org/x/term v0.29.0 + golang.org/x/term v0.30.0 golang.org/x/text v0.25.0 golang.org/x/time v0.11.0 gonum.org/v1/plot v0.16.0 @@ -83,6 +85,7 @@ require ( git.sr.ht/~sbinet/gg v0.6.0 // indirect github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e // indirect github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/VictoriaMetrics/fastcache v1.12.2 // indirect github.com/adrg/xdg v0.4.0 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect @@ -98,12 +101,12 @@ require ( github.com/campoy/embedmd v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect - github.com/consensys/bavard v0.1.27 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect @@ -111,6 +114,7 @@ require ( github.com/crate-crypto/go-eth-kzg v1.3.0 // indirect github.com/crate-crypto/go-ipa v0.0.0-20240724233137-53bbb0ceb27a // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect + github.com/dchest/siphash v1.2.3 // indirect github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect @@ -122,11 +126,12 @@ require ( github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.3 // indirect + github.com/emicklei/dot v1.6.2 // indirect github.com/ethereum/c-kzg-4844/v2 v2.1.0 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/fgprof v0.9.5 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/ferranbt/fastssz v0.1.2 // indirect + github.com/ferranbt/fastssz v0.1.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect @@ -140,10 +145,11 @@ require ( github.com/go-yaml/yaml v2.1.0+incompatible // indirect github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect - github.com/gofrs/flock v0.8.1 // indirect + github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect @@ -198,7 +204,6 @@ require ( github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.1 // indirect - github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/term v0.5.2 // indirect github.com/morikuni/aec v1.0.0 // indirect @@ -291,8 +296,8 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/image v0.25.0 // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect @@ -302,15 +307,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gotest.tools/v3 v3.5.2 // indirect lukechampine.com/blake3 v1.3.0 // indirect - rsc.io/tmplfunc v0.0.3 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/Quarkchain/op-geth v0.0.0-20250728040603-bb068d7c4868 +replace github.com/ethereum/go-ethereum => github.com/Quarkchain/op-geth v0.0.0-20251008030011-391974388283 // replace github.com/ethereum/go-ethereum => ../op-geth // replace github.com/ethereum-optimism/superchain-registry/superchain => ../superchain-registry/superchain - // This release keeps breaking Go builds. Stop that. exclude ( github.com/kataras/iris/v12 v12.2.0-beta5 diff --git a/go.sum b/go.sum index 21c9eaccb6be5..dd4399b0b96de 100644 --- a/go.sum +++ b/go.sum @@ -37,8 +37,10 @@ github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lpr github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Quarkchain/op-geth v0.0.0-20250728040603-bb068d7c4868 h1:DtNvO7rj/pf3y2L1P1BKqDj9U4jVZ96E4WMSDnyA3MI= -github.com/Quarkchain/op-geth v0.0.0-20250728040603-bb068d7c4868/go.mod h1:SkytozVEPtnUeBlquwl0Qv5JKvrN/Y5aqh+VkQo/EOI= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= +github.com/Quarkchain/op-geth v0.0.0-20251008030011-391974388283 h1:RqEJ1+5Z2iajDJhv9DTpnuExuyrM1TAZAnf5AeTiJUc= +github.com/Quarkchain/op-geth v0.0.0-20251008030011-391974388283/go.mod h1:Ct2QjqZ2UKgvvgKLLYzoh/DBicJZB8DXsv45DgEjcco= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= @@ -63,6 +65,8 @@ github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQh github.com/armon/go-metrics v0.3.8/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/base/go-bip39 v1.1.0 h1:ely6zK09KaQbfX8wpcmN4pRXy0SbbqMT2QF45P1BNh0= +github.com/base/go-bip39 v1.1.0/go.mod h1:grZZXX8gYycovDC4cLS/RS0DmctofwHN+MUhedYCbO0= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -106,6 +110,7 @@ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= @@ -136,6 +141,9 @@ github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.3 h1:5bA+k2Y6r+oz/6Z/RFlNeVCesGARKuC6YymtcDrbC/I= @@ -152,10 +160,8 @@ github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 h1:zuQyyAK github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06/go.mod h1:7nc4anLGjupUW/PeY5qiNYsdNXj7zopG+eqsS7To5IQ= github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE= github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs= -github.com/consensys/bavard v0.1.27 h1:j6hKUrGAy/H+gpNrpLU3I26n1yc+VMGmd6ID5+gAhOs= -github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs= -github.com/consensys/gnark-crypto v0.16.0 h1:8Dl4eYmUWK9WmlP1Bj6je688gBRJCJbT8Mw4KoTAawo= -github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU= +github.com/consensys/gnark-crypto v0.18.0 h1:vIye/FqI50VeAr0B3dx+YjeIvmc3LWz4yEfbWBpTUf0= +github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx8ubaYfDROyp2Z8c= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= @@ -184,6 +190,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= +github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= +github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= github.com/deckarep/golang-set/v2 v2.6.0 h1:XfcQbWM1LlMB8BsJ8N9vW5ehnnPVIw0je80NsVHagjM= github.com/deckarep/golang-set/v2 v2.6.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= @@ -228,8 +236,10 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3 h1:RWHKLhCrQThMfch+QJ1Z8veEq5ZO3DfIhZ7xgRP9WTc= -github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.3/go.mod h1:QziizLAiF0KqyLdNJYD7O5cpDlaFMNZzlxYNcWsJUxs= +github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= +github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e h1:iy1vBIzACYUyOVyoADUwvAiq2eOPC0yVsDUdolPwQjk= +github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e/go.mod h1:DYj7+vYJ4cIB7zera9mv4LcAynCL5u4YVfoeUu6Wa+w= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20250603144016-9c45ca7d4508 h1:A/3QVFt+Aa9ozpPVXxUTLui8honBjSusAaiCVRbafgs= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20250603144016-9c45ca7d4508/go.mod h1:NZ816PzLU1TLv1RdAvYAb6KWOj4Zm5aInT0YpDVml2Y= github.com/ethereum/c-kzg-4844/v2 v2.1.0 h1:gQropX9YFBhl3g4HYhwE70zq3IHFRgbbNPw0Shwzf5w= @@ -246,8 +256,8 @@ github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/ferranbt/fastssz v0.1.2 h1:Dky6dXlngF6Qjc+EfDipAkE83N5I5DE68bY6O0VLNPk= -github.com/ferranbt/fastssz v0.1.2/go.mod h1:X5UPrE2u1UJjxHA8X54u04SBwdAQjG2sFtWs39YxyWs= +github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= +github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -308,8 +318,8 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= -github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= +github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -353,7 +363,11 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= +github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8 h1:Ep/joEub9YwcjRY6ND3+Y/w0ncE540RtGatVhtZL0/Q= github.com/google/gofuzz v1.2.1-0.20220503160820-4a35382e8fc8/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -368,7 +382,6 @@ github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0Z github.com/google/pprof v0.0.0-20241009165004-a3522334989c h1:NDovD0SMpBYXlE1zJmS1q55vWB/fUQBcPAqAboZSccA= github.com/google/pprof v0.0.0-20241009165004-a3522334989c/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -606,9 +619,6 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= -github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= -github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= @@ -791,8 +801,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/protolambda/ctxlock v0.1.0 h1:rCUY3+vRdcdZXqT07iXgyr744J2DU2LCBIXowYAjBCE= github.com/protolambda/ctxlock v0.1.0/go.mod h1:vefhX6rIZH8rsg5ZpOJfEDYQOppZi19SfPiGOFrNnwM= -github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48 h1:cSo6/vk8YpvkLbk9v3FO97cakNmUoxwi2KMP8hd5WIw= -github.com/prysmaticlabs/gohashtree v0.0.1-alpha.0.20220714111606-acbb2962fb48/go.mod h1:4pWaT30XoEx1j8KNJf3TV+E3mQkaufn7mf+jRNb/Fuk= +github.com/prysmaticlabs/gohashtree v0.0.4-beta h1:H/EbCuXPeTV3lpKeXGPpEV9gsUpkqOOVnWapUyeWro4= +github.com/prysmaticlabs/gohashtree v0.0.4-beta/go.mod h1:BFdtALS+Ffhg3lGQIHv9HDWuHS8cTvHZzrHWxwOtGOs= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= @@ -897,8 +907,6 @@ github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYN github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= -github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc= github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1000,8 +1008,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -1054,12 +1062,14 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1137,8 +1147,8 @@ golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1147,8 +1157,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1269,7 +1279,5 @@ lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= -rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/justfile b/justfile index 33539c92ab9ae..131d24a076df0 100644 --- a/justfile +++ b/justfile @@ -17,3 +17,21 @@ shellcheck: # Generates a table of contents for the README.md file. toc: md_toc -p github README.md + +latest-versions: + ./ops/scripts/latest-versions.sh + +# Usage: +# just update-op-geth 2f0528b +# just update-op-geth v1.101602.4 +# just update-op-geth optimism +update-op-geth ref: + @ref="{{ref}}"; \ + if [ -z "$ref" ]; then echo "error: provide a hash/tag/branch"; exit 1; fi; \ + tmpl=$(printf "\173\173.Version\175\175"); \ + ver=$(go list -m -f "$tmpl" github.com/ethereum-optimism/op-geth@"$ref"); \ + if [ -z "$ver" ]; then echo "error: couldn't resolve $ref"; exit 1; fi; \ + go mod edit -replace=github.com/ethereum/go-ethereum=github.com/ethereum-optimism/op-geth@"$ver"; \ + go mod tidy; \ + echo "Updated op-geth to $ver" + diff --git a/kurtosis-devnet/cmd/main.go b/kurtosis-devnet/cmd/main.go index ab083af0446c7..e9a651cb94362 100644 --- a/kurtosis-devnet/cmd/main.go +++ b/kurtosis-devnet/cmd/main.go @@ -1,15 +1,18 @@ package main import ( + "context" "encoding/json" "fmt" "log" "os" "path/filepath" + "github.com/BurntSushi/toml" "github.com/ethereum-optimism/optimism/devnet-sdk/telemetry" "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/deploy" "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" + "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" autofixTypes "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/types" "github.com/honeycombio/otel-config-go/otelconfig" "github.com/urfave/cli/v2" @@ -21,6 +24,7 @@ type config struct { kurtosisPackage string enclave string environment string + conductorConfig string dryRun bool baseDir string kurtosisBinary string @@ -34,6 +38,7 @@ func newConfig(c *cli.Context) (*config, error) { kurtosisPackage: c.String("kurtosis-package"), enclave: c.String("enclave"), environment: c.String("environment"), + conductorConfig: c.String("conductor-config"), dryRun: c.Bool("dry-run"), kurtosisBinary: c.String("kurtosis-binary"), autofix: c.String("autofix"), @@ -68,6 +73,38 @@ func writeEnvironment(path string, env *kurtosis.KurtosisEnvironment) error { return nil } +func writeConductorConfig(path string, enclaveName string) error { + if path == "" { + return nil + } + + ctx := context.Background() + conductorConfig, err := inspect.ExtractConductorConfig(ctx, enclaveName) + if err != nil { + log.Printf("Warning: Could not extract conductor config: %v", err) + return nil + } + + if conductorConfig == nil { + log.Println("No conductor services found, skipping conductor config generation") + return nil + } + + out, err := os.Create(path) + if err != nil { + return fmt.Errorf("error creating conductor config file: %w", err) + } + defer out.Close() + + encoder := toml.NewEncoder(out) + if err := encoder.Encode(conductorConfig); err != nil { + return fmt.Errorf("error encoding conductor config as TOML: %w", err) + } + + log.Printf("Conductor configuration saved to: %s", path) + return nil +} + func printAutofixMessage() { fmt.Println("Trouble with your devnet? Try Autofix!") fmt.Println("Set AUTOFIX=true to automatically fix common configuration issues.") @@ -137,7 +174,17 @@ func mainAction(c *cli.Context) error { return fmt.Errorf("error deploying environment: %w", err) } - return writeEnvironment(cfg.environment, env) + // Write environment JSON file + if err := writeEnvironment(cfg.environment, env); err != nil { + return fmt.Errorf("error writing environment file: %w", err) + } + + // Write conductor configuration TOML file + if err := writeConductorConfig(cfg.conductorConfig, cfg.enclave); err != nil { + return fmt.Errorf("error writing conductor config file: %w", err) + } + + return nil } func getFlags() []cli.Flag { @@ -165,6 +212,10 @@ func getFlags() []cli.Flag { Name: "environment", Usage: "Path to JSON environment file output (optional)", }, + &cli.StringFlag{ + Name: "conductor-config", + Usage: "Path to TOML conductor configuration file output (optional)", + }, &cli.BoolFlag{ Name: "dry-run", Usage: "Dry run mode (optional)", diff --git a/kurtosis-devnet/flash.yaml b/kurtosis-devnet/flash.yaml index 798fd2f4c4d58..b34eea0df02e1 100644 --- a/kurtosis-devnet/flash.yaml +++ b/kurtosis-devnet/flash.yaml @@ -5,19 +5,54 @@ optimism_package: chains: op-kurtosis: participants: - node0: &x-node + node0: + sequencer: true el: type: op-geth + el_builder: + type: op-rbuilder + cl_builder: + type: op-node + image: {{ localDockerImage "op-node" }} + mev_params: + enabled: true cl: type: op-node image: {{ localDockerImage "op-node" }} - builder_type: "op-rbuilder" - builder_image: "us-docker.pkg.dev/oplabs-tools-artifacts/dev-images/op-rbuilder:sha-4aee498" + conductor_params: + image: {{ localDockerImage "op-conductor" }} + enabled: true + bootstrap: true + paused: true + admin: true + proxy: true + websocket_enabled: true + + node1: + sequencer: true + el: + type: op-reth + el_builder: + type: op-rbuilder + cl_builder: + type: op-node + image: {{ localDockerImage "op-node" }} mev_params: enabled: true - type: "rollup-boost" - image: "docker.io/flashbots/rollup-boost:0.6.2" - node1: *x-node + cl: + type: op-node + image: {{ localDockerImage "op-node" }} + conductor_params: + image: {{ localDockerImage "op-conductor" }} + enabled: true + paused: true + admin: true + proxy: true + websocket_enabled: true + + proxyd_params: + pprof_enabled: false + extra_params: [] network_params: network: "kurtosis" network_id: "2151908" @@ -26,13 +61,14 @@ optimism_package: granite_time_offset: 0 holocene_time_offset: 0 fund_dev_accounts: true + + flashblocks_websocket_proxy_params: + enabled: true + flashblocks_rpc_params: + type: op-reth batcher_params: image: {{ localDockerImage "op-batcher" }} extra_params: [] - conductor_params: - image: {{ localDockerImage "op-conductor" }} - enabled: true - bootstrap: true proposer_params: image: {{ localDockerImage "op-proposer" }} extra_params: [] @@ -59,6 +95,7 @@ ethereum_package: participants: - el_type: geth cl_type: teku + cl_image: consensys/teku:25.7.1 network_params: preset: minimal genesis_delay: 5 diff --git a/kurtosis-devnet/interop.yaml b/kurtosis-devnet/interop.yaml index c7cb99a74ffcd..2295c963c6bb8 100644 --- a/kurtosis-devnet/interop.yaml +++ b/kurtosis-devnet/interop.yaml @@ -18,6 +18,7 @@ "log_level" "--log.level=info" "log_format" "--log.format=logfmtms" "interop_mempool_filtering" "--rollup.interopmempoolfiltering" + "experimental_sequencer_api" "--experimental.sequencer-api" -}} --- optimism_package: @@ -37,13 +38,16 @@ optimism_package: extra_params: - {{ $flags.log_level }} - {{ $flags.log_format }} + test-sequencers: + sequencer: + enabled: true chains: op-kurtosis1: participants: node0: &x-node el: type: op-geth - image: "" + image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101602.1-rc.1" log_level: "" extra_env_vars: {} extra_labels: {} @@ -63,6 +67,7 @@ optimism_package: extra_labels: {} extra_params: - {{ $flags.log_format }} + - {{ $flags.experimental_sequencer_api }} tolerations: [] volume_size: 0 min_cpu: 0 @@ -81,6 +86,7 @@ optimism_package: granite_time_offset: 0 holocene_time_offset: 0 isthmus_time_offset: 0 + jovian_time_offset: 0 interop_time_offset: 0 fund_dev_accounts: true batcher_params: @@ -105,6 +111,8 @@ optimism_package: fjord_time_offset: 0 granite_time_offset: 0 holocene_time_offset: 0 + isthmus_time_offset: 0 + jovian_time_offset: 0 interop_time_offset: 0 fund_dev_accounts: true batcher_params: @@ -151,6 +159,7 @@ ethereum_package: participants: - el_type: geth cl_type: teku + cl_image: consensys/teku:25.7.1 network_params: preset: minimal genesis_delay: 5 diff --git a/kurtosis-devnet/isthmus.yaml b/kurtosis-devnet/isthmus.yaml deleted file mode 100644 index 11502857fee2b..0000000000000 --- a/kurtosis-devnet/isthmus.yaml +++ /dev/null @@ -1,99 +0,0 @@ -optimism_package: - faucet: - enabled: true - image: {{ localDockerImage "op-faucet" }} - chains: - op-kurtosis: - participants: - node0: - el: - type: op-geth - image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101503.2-rc.3" - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - cl: &x-node-cl - type: op-node - image: {{ localDockerImage "op-node" }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - node1: - el: - type: op-reth - image: "ghcr.io/paradigmxyz/op-reth@sha256:7d83174c900a623897d5cf3a42764f19047ca47034f9726f5a9fad2c7ed32fee" - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - cl: *x-node-cl - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: {{ localDockerImage "op-batcher" }} - extra_params: [] - proposer_params: - image: {{ localDockerImage "op-proposer" }} - extra_params: [] - game_type: 1 - proposal_interval: 10m - challengers: - challenger: - enabled: true - image: {{ localDockerImage "op-challenger" }} - participants: "*" - cannon_prestates_url: {{ localPrestate.URL }} - cannon_trace_types: ["cannon", "permissioned"] - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} - overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/pectra.yaml b/kurtosis-devnet/jovian.yaml similarity index 85% rename from kurtosis-devnet/pectra.yaml rename to kurtosis-devnet/jovian.yaml index 4892c95def01c..a0ab9bcb1b9c8 100644 --- a/kurtosis-devnet/pectra.yaml +++ b/kurtosis-devnet/jovian.yaml @@ -1,11 +1,14 @@ optimism_package: + faucet: + enabled: true + image: {{ localDockerImage "op-faucet" }} chains: op-kurtosis: participants: node0: el: type: op-geth - image: "" + image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101602.1-rc.1" log_level: "" extra_env_vars: {} extra_labels: {} @@ -16,7 +19,7 @@ optimism_package: max_cpu: 0 min_mem: 0 max_mem: 0 - cl: + cl: &x-node-cl type: op-node image: {{ localDockerImage "op-node" }} log_level: "" @@ -36,6 +39,8 @@ optimism_package: fjord_time_offset: 0 granite_time_offset: 0 holocene_time_offset: 0 + isthmus_time_offset: 0 + jovian_time_offset: 0 fund_dev_accounts: true batcher_params: image: {{ localDockerImage "op-batcher" }} @@ -63,18 +68,12 @@ optimism_package: global_tolerations: [] persistent: false ethereum_package: - participants_matrix: - el: - - el_type: geth - el_image: ethpandaops/geth:prague-devnet-5-f85cde7 - cl: - - cl_type: lighthouse - cl_image: ethpandaops/lighthouse:single_attestation-b6d80eb + participants: + - el_type: geth + cl_type: teku + cl_image: consensys/teku:25.7.1 network_params: - electra_fork_epoch: 1 - min_validator_withdrawability_delay: 1 - shard_committee_period: 1 - churn_limit_quotient: 16 + preset: minimal genesis_delay: 5 additional_preloaded_contracts: | { diff --git a/kurtosis-devnet/justfile b/kurtosis-devnet/justfile index bff8e37ea0e7a..13e6680f5cef0 100644 --- a/kurtosis-devnet/justfile +++ b/kurtosis-devnet/justfile @@ -62,19 +62,14 @@ devnet TEMPLATE_FILE DATA_FILE="" NAME="" PACKAGE=KURTOSIS_PACKAGE: _prerequisit fi fi export ENCL_NAME="$DEVNET_NAME"-devnet - if [ -n "{{DATA_FILE}}" ]; then - go run cmd/main.go -kurtosis-package {{PACKAGE}} \ - -environment "tests/$ENCL_NAME.json" \ - -template "{{TEMPLATE_FILE}}" \ - -data "{{DATA_FILE}}" \ - -enclave "$ENCL_NAME" - else - go run cmd/main.go -kurtosis-package {{PACKAGE}} \ - -environment "tests/$ENCL_NAME.json" \ - -template "{{TEMPLATE_FILE}}" \ - -enclave "$ENCL_NAME" - fi \ - && cat "tests/$ENCL_NAME.json" + export CONDUCTOR_CONFIG="tests/op-conductor-ops-$ENCL_NAME.toml" + go run cmd/main.go -kurtosis-package {{PACKAGE}} \ + -environment "tests/$ENCL_NAME.json" \ + -conductor-config "$CONDUCTOR_CONFIG" \ + -template "{{TEMPLATE_FILE}}" \ + -data "{{DATA_FILE}}" \ + -enclave "$ENCL_NAME" \ + && cat "tests/$ENCL_NAME.json" && if [ -f "$CONDUCTOR_CONFIG" ]; then cat "$CONDUCTOR_CONFIG"; fi devnet-test DEVNET *TEST: _prerequisites #!/usr/bin/env bash @@ -98,11 +93,8 @@ interop-devnet-test: (devnet-test "interop-devnet" "interop-smoke-test.sh") user-devnet DATA_FILE: {{just_executable()}} devnet "user.yaml" {{DATA_FILE}} {{file_stem(DATA_FILE)}} -# Pectra devnet -pectra-devnet: (devnet "pectra.yaml") - -# Isthmus devnet -isthmus-devnet: (devnet "isthmus.yaml") +# Jovian devnet +jovian-devnet: (devnet "jovian.yaml") # Flashblocks devnet flash-devnet: (devnet "flash.yaml") diff --git a/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml b/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml index 6eb0340359c76..58f2cc557a9f5 100644 --- a/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml +++ b/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml @@ -2,7 +2,7 @@ name: github.com/ethereum-optimism/optimism/kurtosis-devnet/optimism-package-tra description: |- A trampoline package for optimism-package. This one is reproducible, due to the replace directives below. replace: - github.com/QuarkChain/optimism-package: github.com/QuarkChain/optimism-package@fee3af43e8a2abe3358e9d407dab416bdb9a484e + github.com/QuarkChain/optimism-package: github.com/QuarkChain/optimism-package@9553f5f6a70068139d1709b77665314c5ebbb78e github.com/ethpandaops/ethereum-package: github.com/ethpandaops/ethereum-package@83830d44823767af65eda7dfe6b26c87c536c4cf github.com/kurtosis-tech/prometheus-package: github.com/kurtosis-tech/prometheus-package@637c9dea933be18e47f96cadc0d9bb0e3a5aa9d6 # v1.0.0 github.com/kurtosis-tech/postgres-package: github.com/kurtosis-tech/postgres-package@9cbdde2c55e8d1656deb87821465a2ad244d8b33 # v1.0.0 diff --git a/kurtosis-devnet/pkg/kurtosis/endpoints.go b/kurtosis-devnet/pkg/kurtosis/endpoints.go index 9ca074e3bb210..ad786e42f800d 100644 --- a/kurtosis-devnet/pkg/kurtosis/endpoints.go +++ b/kurtosis-devnet/pkg/kurtosis/endpoints.go @@ -182,6 +182,15 @@ func (f *ServiceFinder) triageByLabels(svc *inspect.Service, name string, endpoi if !ok { return nil } + + // So that we can have the same behaviour as netchef + if (tag == "flashblocks-websocket-proxy") && endpoints != nil { + if _, has := endpoints["ws-flashblocks"]; !has { + if ws, ok := endpoints["ws"]; ok { + endpoints["ws-flashblocks"] = ws + } + } + } network_ids := f.getNetworkIDs(svc) idx := -1 if val, ok := svc.Labels[nodeIndexLabel]; ok { diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md b/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md new file mode 100644 index 0000000000000..95ce2688d2eab --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md @@ -0,0 +1,351 @@ +# Kurtosis Inspect Tool + +A command-line tool for inspecting Kurtosis enclaves and extracting conductor configurations and environment data from running Optimism devnets. + +## Overview + +The Kurtosis Inspect Tool provides a clean interface to: + +- 🔍 **Inspect running Kurtosis enclaves** - Extract service information and file artifacts +- 🎛️ **Generate conductor configurations** - Create TOML configs for `op-conductor-ops` +- 📊 **Export environment data** - Save complete devnet information as JSON +- 🔧 **Fix Traefik issues** - Repair missing network labels on containers + +## Installation + +### Build from Source + +```bash +cd optimism/kurtosis-devnet +go build -o kurtosis-inspect pkg/kurtosis/sources/inspect/cmd/main.go +``` + +### Run Directly + +```bash +go run pkg/kurtosis/sources/inspect/cmd/main.go [options] +``` + +## Usage + +### Basic Inspection + +Inspect a running enclave and display results: + +```bash +./kurtosis-inspect my-devnet-enclave +``` + +### Extract Conductor Configuration + +Generate a conductor configuration file for use with `op-conductor-ops`: + +```bash +./kurtosis-inspect --conductor-config conductor.toml my-devnet-enclave +``` + +### Export Complete Environment + +Save the complete environment data as JSON: + +```bash +./kurtosis-inspect --environment environment.json my-devnet-enclave +``` + +### Combined Export + +Extract both conductor config and environment data: + +```bash +./kurtosis-inspect \ + --conductor-config conductor.toml \ + --environment environment.json \ + my-devnet-enclave +``` + +### Fix Traefik Network Issues + +Repair missing Traefik labels on containers: + +```bash +./kurtosis-inspect --fix-traefik my-devnet-enclave +``` + +## Configuration Options + +### CLI Flags + +| Flag | Environment Variable | Description | +|------|---------------------|-------------| +| `--conductor-config` | `KURTOSIS_INSPECT_CONDUCTOR_CONFIG` | Path to write conductor configuration TOML file | +| `--environment` | `KURTOSIS_INSPECT_ENVIRONMENT` | Path to write environment JSON file | +| `--fix-traefik` | `KURTOSIS_INSPECT_FIX_TRAEFIK` | Fix missing Traefik labels on containers | +| `--log.level` | `KURTOSIS_INSPECT_LOG_LEVEL` | Logging level (DEBUG, INFO, WARN, ERROR) | +| `--log.format` | `KURTOSIS_INSPECT_LOG_FORMAT` | Log format (text, json, logfmt) | + +### Environment Variables + +All flags can be set via environment variables with the `KURTOSIS_INSPECT_` prefix: + +```bash +export KURTOSIS_INSPECT_CONDUCTOR_CONFIG="/tmp/conductor.toml" +export KURTOSIS_INSPECT_ENVIRONMENT="/tmp/environment.json" +export KURTOSIS_INSPECT_LOG_LEVEL="DEBUG" + +./kurtosis-inspect my-devnet-enclave +``` + +## Output Formats + +### Conductor Configuration (TOML) + +The conductor configuration file is compatible with `op-conductor-ops`: + +```toml +[networks] + [networks.2151908-chain0-kona] + sequencers = ["op-conductor-2151908-chain0-kona-sequencer"] + [networks.2151908-chain0-optimism] + sequencers = ["op-conductor-2151908-chain0-optimism-sequencer"] + +[sequencers] + [sequencers.op-conductor-2151908-chain0-kona-sequencer] + raft_addr = "127.0.0.1:60135" + conductor_rpc_url = "http://127.0.0.1:60134" + node_rpc_url = "http://127.0.0.1:60048" + voting = true + [sequencers.op-conductor-2151908-chain0-optimism-sequencer] + raft_addr = "127.0.0.1:60176" + conductor_rpc_url = "http://127.0.0.1:60177" + node_rpc_url = "http://127.0.0.1:60062" + voting = true +``` + +### Environment Data (JSON) + +Complete environment data including services and file artifacts: + +```json +{ + "FileArtifacts": [ + "genesis-l1.json", + "genesis-l2-chain0.json", + "jwt.txt", + "rollup-l2-chain0.json" + ], + "UserServices": { + "op-node-chain0-sequencer": { + "Labels": { + "app": "op-node", + "chain": "chain0", + "role": "sequencer" + }, + "Ports": { + "rpc": { + "Host": "127.0.0.1", + "Port": 9545 + }, + "p2p": { + "Host": "127.0.0.1", + "Port": 9222 + } + } + } + } +} +``` + +## Integration with op-conductor-ops + +### 1. Generate Conductor Configuration + +```bash +# Extract conductor config from running devnet +./kurtosis-inspect --conductor-config conductor.toml my-devnet + +# Use with op-conductor-ops +cd infra/op-conductor-ops +python op-conductor-ops.py --config ../../kurtosis-devnet/conductor.toml status +``` + +### 2. Leadership Transfer Example + +```bash +# Generate config and perform leadership transfer +./kurtosis-inspect --conductor-config conductor.toml my-devnet +cd infra/op-conductor-ops +python op-conductor-ops.py --config ../../kurtosis-devnet/conductor.toml \ + transfer-leadership \ + --target-sequencer "op-conductor-2151908-chain0-optimism-sequencer" +``` + +## Examples + +### Simple Devnet + +```bash +# Deploy simple devnet +cd kurtosis-devnet +just devnet simple + +# Inspect and extract configs +./kurtosis-inspect --conductor-config tests/simple-conductor.toml simple-devnet + +# Check conductor status +cd ../infra/op-conductor-ops +python op-conductor-ops.py --config ../../kurtosis-devnet/tests/simple-conductor.toml status +``` + +### Multi-Chain Interop + +```bash +# Deploy interop devnet +just devnet interop + +# Extract complex conductor configuration +./kurtosis-inspect \ + --conductor-config tests/interop-conductor.toml \ + --environment tests/interop-environment.json \ + interop-devnet + +# View conductor cluster status +cd ../infra/op-conductor-ops +python op-conductor-ops.py --config ../../kurtosis-devnet/tests/interop-conductor.toml status +``` + +### Debugging Network Issues + +```bash +# Fix Traefik network issues +./kurtosis-inspect --fix-traefik my-devnet + +# Inspect with debug logging +./kurtosis-inspect --log.level DEBUG --log.format json my-devnet +``` + +## Architecture + +The tool follows a clean architecture pattern with clear separation of concerns: + +``` +pkg/kurtosis/sources/inspect/ +├── cmd/main.go # CLI setup and entry point +├── config.go # Configuration parsing and validation +├── service.go # Business logic and service layer +├── conductor.go # Conductor configuration extraction +├── inspect.go # Core inspection functionality +├── flags/ +│ ├── flags.go # CLI flag definitions +│ └── flags_test.go # Flag testing +└── *_test.go # Comprehensive test suite +``` + +### Key Components + +- **Config**: Handles CLI argument parsing and validation +- **InspectService**: Main business logic for inspection operations +- **ConductorConfig**: Data structures for conductor configuration +- **Inspector**: Core enclave inspection functionality + +## Testing + +### Run All Tests + +```bash +go test ./pkg/kurtosis/sources/inspect/... -v +``` + +### Test Coverage + +```bash +go test ./pkg/kurtosis/sources/inspect/... -cover +``` + +### Test Categories + +- **Unit Tests**: Individual component functionality +- **Integration Tests**: File I/O and configuration parsing +- **Real-World Tests**: Based on actual devnet configurations +- **Error Tests**: Permission and validation error handling + +## Troubleshooting + +### Common Issues + +#### Kurtosis Engine Not Running + +``` +Error: failed to create Kurtosis context: The Kurtosis Engine Server is unavailable +``` + +**Solution:** +```bash +kurtosis engine start +``` + +#### Enclave Not Found + +``` +Error: failed to get enclave: enclave with identifier 'my-devnet' not found +``` + +**Solution:** +```bash +# List available enclaves +kurtosis enclave ls + +# Use correct enclave name +./kurtosis-inspect +``` + +#### Permission Denied + +``` +Error: error creating conductor config file: permission denied +``` + +**Solution:** +```bash +# Ensure write permissions to output directory +chmod 755 /output/directory +``` + +### Debug Mode + +Enable debug logging for detailed troubleshooting: + +```bash +./kurtosis-inspect --log.level DEBUG --log.format json my-devnet +``` + +## Contributing + +### Development Setup + +```bash +# Install dependencies +go mod download + +# Run tests +go test ./pkg/kurtosis/sources/inspect/... -v + +# Build +go build -o kurtosis-inspect pkg/kurtosis/sources/inspect/cmd/main.go +``` + +### Adding New Features + +1. Add functionality to appropriate service layer +2. Create comprehensive tests with real data +3. Update CLI flags if needed +4. Update this README with examples + +## Related Tools + +- **[op-conductor-ops](../../infra/op-conductor-ops/)**: Python CLI for managing conductor clusters +- **[Kurtosis](https://kurtosis.com/)**: Orchestration platform for development environments +- **[Optimism Devnet](../)**: Kurtosis package for Optimism development networks + +## License + +This tool is part of the Optimism monorepo and follows the same licensing terms. \ No newline at end of file diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go index 2840f36028c96..8b4fb2ddab18c 100644 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go @@ -5,61 +5,61 @@ package main import ( "context" - "flag" "fmt" "os" + "github.com/urfave/cli/v2" + "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" + "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + oplog "github.com/ethereum-optimism/optimism/op-service/log" ) -func main() { - ctx := context.Background() +var ( + Version = "v0.1.0" + GitCommit = "" + GitDate = "" +) - var fixTraefik bool - flag.BoolVar(&fixTraefik, "fix-traefik", false, "Fix missing Traefik labels on containers") +func main() { + app := cli.NewApp() + app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "") + app.Name = "kurtosis-inspect" + app.Usage = "Inspect Kurtosis enclaves and extract configurations" + app.Description = "Tool to inspect running Kurtosis enclaves and extract conductor configurations and environment data" + app.Flags = cliapp.ProtectFlags(flags.Flags) + app.Action = cliapp.LifecycleCmd(run) + app.ArgsUsage = "" - flag.Parse() - if flag.NArg() != 1 { - fmt.Fprintf(os.Stderr, "Usage: %s [--fix-traefik] \n", os.Args[0]) + if err := app.Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(1) } +} - enclaveID := flag.Arg(0) - - // If fix-traefik flag is provided, run the fix - if fixTraefik { - fmt.Println("🔧 Fixing Traefik network configuration...") - if err := util.SetReverseProxyConfig(ctx); err != nil { - fmt.Fprintf(os.Stderr, "Error fixing Traefik network: %v\n", err) - os.Exit(1) - } - fmt.Println("✅ Traefik network configuration fixed!") - return +func run(cliCtx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) { + // Parse configuration + cfg, err := inspect.NewConfig(cliCtx) + if err != nil { + return nil, err } - inspector := inspect.NewInspector(enclaveID) + // Setup logging + log := oplog.NewLogger(oplog.AppOut(cliCtx), oplog.ReadCLIConfig(cliCtx)) + oplog.SetGlobalLogHandler(log.Handler()) - data, err := inspector.ExtractData(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Error inspecting enclave: %v\n", err) - os.Exit(1) - } + // Create service + service := inspect.NewInspectService(cfg, log) - fmt.Println("File Artifacts:") - for _, artifact := range data.FileArtifacts { - fmt.Printf(" %s\n", artifact) - } + // Create background context for operations + ctx := context.Background() - fmt.Println("\nServices:") - for name, svc := range data.UserServices { - fmt.Printf(" %s:\n", name) - for portName, portInfo := range svc.Ports { - host := portInfo.Host - if host == "" { - host = "localhost" - } - fmt.Printf(" %s: %s:%d\n", portName, host, portInfo.Port) - } + // Run the service + if err := service.Run(ctx); err != nil { + return nil, err } + + return nil, nil } diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go new file mode 100644 index 0000000000000..2c0538c917424 --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go @@ -0,0 +1,155 @@ +package inspect + +import ( + "context" + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" + "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" +) + +type ConductorSequencer struct { + RaftAddr string `json:"raft_addr" toml:"raft_addr"` + ConductorRPCURL string `json:"conductor_rpc_url" toml:"conductor_rpc_url"` + NodeRPCURL string `json:"node_rpc_url" toml:"node_rpc_url"` + Voting bool `json:"voting" toml:"voting"` +} + +type ConductorNetwork struct { + Sequencers []string `json:"sequencers" toml:"sequencers"` +} + +type ConductorConfig struct { + Networks map[string]*ConductorNetwork `json:"networks" toml:"networks"` + Sequencers map[string]*ConductorSequencer `json:"sequencers" toml:"sequencers"` +} + +func ExtractConductorConfig(ctx context.Context, enclaveID string) (*ConductorConfig, error) { + kurtosisCtx, err := wrappers.GetDefaultKurtosisContext() + if err != nil { + return nil, fmt.Errorf("failed to get Kurtosis context: %w", err) + } + + enclaveCtx, err := kurtosisCtx.GetEnclave(ctx, enclaveID) + if err != nil { + return nil, fmt.Errorf("failed to get enclave: %w", err) + } + + services, err := enclaveCtx.GetServices() + if err != nil { + return nil, fmt.Errorf("failed to get services: %w", err) + } + + conductorServices := make(map[string]map[string]interface{}) + opNodeServices := make(map[string]map[string]interface{}) + + for svcName := range services { + svcNameStr := string(svcName) + + svcCtx, err := enclaveCtx.GetService(svcNameStr) + if err != nil { + continue + } + + labels := svcCtx.GetLabels() + ports := make(map[string]*descriptors.PortInfo) + + for portName, portSpec := range svcCtx.GetPublicPorts() { + ports[portName] = &descriptors.PortInfo{ + Host: svcCtx.GetMaybePublicIPAddress(), + Port: int(portSpec.GetNumber()), + } + } + + if labels["op.kind"] == "conductor" { + conductorServices[svcNameStr] = map[string]interface{}{ + "labels": labels, + "ports": ports, + } + } + + if labels["op.kind"] == "cl" && labels["op.cl.type"] == "op-node" { + opNodeServices[svcNameStr] = map[string]interface{}{ + "labels": labels, + "ports": ports, + } + } + } + + if len(conductorServices) == 0 { + return nil, nil + } + + networks := make(map[string]*ConductorNetwork) + sequencers := make(map[string]*ConductorSequencer) + + networkSequencers := make(map[string][]string) + + for conductorSvcName, conductorData := range conductorServices { + labels := conductorData["labels"].(map[string]string) + ports := conductorData["ports"].(map[string]*descriptors.PortInfo) + + networkID := labels["op.network.id"] + if networkID == "" { + continue + } + + // Find the network name from service name (e.g., "op-conductor-2151908-op-kurtosis-node0") + parts := strings.Split(conductorSvcName, "-") + var networkName string + if len(parts) >= 4 { + networkName = strings.Join(parts[2:len(parts)-1], "-") + } + if networkName == "" { + networkName = "unknown" + } + + networkSequencers[networkName] = append(networkSequencers[networkName], conductorSvcName) + + participantName := labels["op.network.participant.name"] + var nodeRPCURL string + + // Look for matching op-node service + for _, nodeData := range opNodeServices { + nodeLabels := nodeData["labels"].(map[string]string) + nodePorts := nodeData["ports"].(map[string]*descriptors.PortInfo) + + if nodeLabels["op.network.participant.name"] == participantName && + nodeLabels["op.network.id"] == networkID { + if rpcPort, ok := nodePorts["rpc"]; ok { + nodeRPCURL = fmt.Sprintf("http://127.0.0.1:%d", rpcPort.Port) + } + break + } + } + + var raftAddr, conductorRPCURL string + + if consensusPort, ok := ports["consensus"]; ok { + raftAddr = fmt.Sprintf("127.0.0.1:%d", consensusPort.Port) + } + + if rpcPort, ok := ports["rpc"]; ok { + conductorRPCURL = fmt.Sprintf("http://127.0.0.1:%d", rpcPort.Port) + } + + sequencers[conductorSvcName] = &ConductorSequencer{ + RaftAddr: raftAddr, + ConductorRPCURL: conductorRPCURL, + NodeRPCURL: nodeRPCURL, + Voting: true, + } + } + + for networkName, sequencerNames := range networkSequencers { + networks[networkName] = &ConductorNetwork{ + Sequencers: sequencerNames, + } + } + + return &ConductorConfig{ + Networks: networks, + Sequencers: sequencers, + }, nil +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go new file mode 100644 index 0000000000000..1c65207427949 --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go @@ -0,0 +1,86 @@ +package inspect + +import ( + "strings" + "testing" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConductorConfig(t *testing.T) { + config := &ConductorConfig{ + Networks: map[string]*ConductorNetwork{ + "chain0": {Sequencers: []string{"seq0"}}, + "chain1": {Sequencers: []string{"seq1"}}, + }, + Sequencers: map[string]*ConductorSequencer{ + "seq0": { + RaftAddr: "127.0.0.1:8001", + ConductorRPCURL: "http://127.0.0.1:8002", + NodeRPCURL: "http://127.0.0.1:8003", + Voting: true, + }, + "seq1": { + RaftAddr: "127.0.0.1:8011", + ConductorRPCURL: "http://127.0.0.1:8012", + NodeRPCURL: "http://127.0.0.1:8013", + Voting: false, + }, + }, + } + + var buf strings.Builder + err := toml.NewEncoder(&buf).Encode(config) + require.NoError(t, err) + + output := buf.String() + assert.Contains(t, output, "[networks]") + assert.Contains(t, output, "[sequencers]") + assert.Contains(t, output, "voting = true") + assert.Contains(t, output, "voting = false") + + var decoded ConductorConfig + err = toml.Unmarshal([]byte(output), &decoded) + require.NoError(t, err) + assert.Equal(t, config.Networks, decoded.Networks) + assert.Equal(t, config.Sequencers, decoded.Sequencers) +} + +func TestConductorSequencer(t *testing.T) { + seq := &ConductorSequencer{ + RaftAddr: "localhost:8080", + ConductorRPCURL: "http://localhost:9090", + NodeRPCURL: "http://localhost:7070", + Voting: true, + } + + assert.Equal(t, "localhost:8080", seq.RaftAddr) + assert.Equal(t, "http://localhost:9090", seq.ConductorRPCURL) + assert.True(t, seq.Voting) +} + +func TestMultiChainConfig(t *testing.T) { + config := &ConductorConfig{ + Networks: map[string]*ConductorNetwork{ + "chain0": {Sequencers: []string{"seq0", "backup0"}}, + "chain1": {Sequencers: []string{"seq1", "observer1"}}, + }, + Sequencers: map[string]*ConductorSequencer{ + "seq0": {RaftAddr: "127.0.0.1:8001", ConductorRPCURL: "http://127.0.0.1:8002", NodeRPCURL: "http://127.0.0.1:8003", Voting: true}, + "backup0": {RaftAddr: "127.0.0.1:8011", ConductorRPCURL: "http://127.0.0.1:8012", NodeRPCURL: "http://127.0.0.1:8013", Voting: true}, + "seq1": {RaftAddr: "127.0.0.1:8021", ConductorRPCURL: "http://127.0.0.1:8022", NodeRPCURL: "http://127.0.0.1:8023", Voting: true}, + "observer1": {RaftAddr: "127.0.0.1:8031", ConductorRPCURL: "http://127.0.0.1:8032", NodeRPCURL: "http://127.0.0.1:8033", Voting: false}, + }, + } + + assert.Len(t, config.Networks, 2) + assert.Len(t, config.Sequencers, 4) + assert.False(t, config.Sequencers["observer1"].Voting) + + var buf strings.Builder + err := toml.NewEncoder(&buf).Encode(config) + require.NoError(t, err) + assert.Contains(t, buf.String(), "voting = false") +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go new file mode 100644 index 0000000000000..935c00e3a9cb9 --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go @@ -0,0 +1,34 @@ +package inspect + +import ( + "fmt" + + "github.com/urfave/cli/v2" +) + +// Config holds the configuration for the inspect service +type Config struct { + EnclaveID string + FixTraefik bool + ConductorConfigPath string + EnvironmentPath string +} + +func NewConfig(ctx *cli.Context) (*Config, error) { + if ctx.NArg() != 1 { + return nil, fmt.Errorf("expected exactly one argument (enclave-id), got %d", ctx.NArg()) + } + + cfg := &Config{ + EnclaveID: ctx.Args().Get(0), + FixTraefik: ctx.Bool("fix-traefik"), + ConductorConfigPath: ctx.String("conductor-config-path"), + EnvironmentPath: ctx.String("environment-path"), + } + + if cfg.EnclaveID == "" { + return nil, fmt.Errorf("enclave-id is required") + } + + return cfg, nil +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go new file mode 100644 index 0000000000000..25dae574ff07e --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go @@ -0,0 +1,91 @@ +package inspect + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" +) + +func TestNewConfig(t *testing.T) { + tests := []struct { + name string + args []string + expected *Config + wantErr bool + }{ + { + name: "valid config", + args: []string{"inspect", "test-enclave"}, + expected: &Config{ + EnclaveID: "test-enclave", + FixTraefik: false, + ConductorConfigPath: "", + EnvironmentPath: "", + }, + wantErr: false, + }, + { + name: "config with flags", + args: []string{ + "inspect", + "--fix-traefik", + "--conductor-config-path", "/tmp/conductor.toml", + "--environment-path", "/tmp/env.json", + "my-enclave", + }, + expected: &Config{ + EnclaveID: "my-enclave", + FixTraefik: true, + ConductorConfigPath: "/tmp/conductor.toml", + EnvironmentPath: "/tmp/env.json", + }, + wantErr: false, + }, + { + name: "no arguments", + args: []string{"inspect"}, + expected: nil, + wantErr: true, + }, + { + name: "too many arguments", + args: []string{"inspect", "enclave1", "enclave2"}, + expected: nil, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + app := &cli.App{ + Name: "inspect", + Flags: []cli.Flag{ + &cli.BoolFlag{Name: "fix-traefik"}, + &cli.StringFlag{Name: "conductor-config-path"}, + &cli.StringFlag{Name: "environment-path"}, + }, + Action: func(ctx *cli.Context) error { + cfg, err := NewConfig(ctx) + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, cfg) + } else { + require.NoError(t, err) + require.NotNil(t, cfg) + assert.Equal(t, tt.expected.EnclaveID, cfg.EnclaveID) + assert.Equal(t, tt.expected.FixTraefik, cfg.FixTraefik) + assert.Equal(t, tt.expected.ConductorConfigPath, cfg.ConductorConfigPath) + assert.Equal(t, tt.expected.EnvironmentPath, cfg.EnvironmentPath) + } + return nil + }, + } + + err := app.Run(tt.args) + require.NoError(t, err) + }) + } +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go new file mode 100644 index 0000000000000..22c6b364984f7 --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go @@ -0,0 +1,50 @@ +package flags + +import ( + "github.com/urfave/cli/v2" + + opservice "github.com/ethereum-optimism/optimism/op-service" + oplog "github.com/ethereum-optimism/optimism/op-service/log" +) + +const EnvVarPrefix = "KURTOSIS_INSPECT" + +var ( + FixTraefik = &cli.BoolFlag{ + Name: "fix-traefik", + Value: false, + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "FIX_TRAEFIK"), + Usage: "Fix missing Traefik labels on containers", + } + ConductorConfig = &cli.StringFlag{ + Name: "conductor-config-path", + Value: "", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "CONDUCTOR_CONFIG"), + Usage: "Path where conductor configuration TOML file will be written (overwrites existing file)", + } + Environment = &cli.StringFlag{ + Name: "environment-path", + Value: "", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "ENVIRONMENT"), + Usage: "Path where environment JSON file will be written (overwrites existing file)", + } +) + +var requiredFlags = []cli.Flag{ + // No required flags +} + +var optionalFlags = []cli.Flag{ + FixTraefik, + ConductorConfig, + Environment, +} + +var Flags []cli.Flag + +func init() { + // Add common op-service flags + optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) + + Flags = append(requiredFlags, optionalFlags...) +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go new file mode 100644 index 0000000000000..8c9d85d29b1ca --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go @@ -0,0 +1,124 @@ +package flags + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" +) + +func TestFlags(t *testing.T) { + tests := []struct { + name string + args []string + envVars map[string]string + expected struct { + fixTraefik bool + conductorConfig string + environment string + } + }{ + { + name: "default values", + args: []string{"inspect", "test-enclave"}, + expected: struct { + fixTraefik bool + conductorConfig string + environment string + }{ + fixTraefik: false, + conductorConfig: "", + environment: "", + }, + }, + { + name: "cli flags set", + args: []string{ + "inspect", + "--fix-traefik", + "--conductor-config-path", "/tmp/conductor.toml", + "--environment-path", "/tmp/env.json", + "test-enclave", + }, + expected: struct { + fixTraefik bool + conductorConfig string + environment string + }{ + fixTraefik: true, + conductorConfig: "/tmp/conductor.toml", + environment: "/tmp/env.json", + }, + }, + { + name: "environment variables", + args: []string{"inspect", "test-enclave"}, + envVars: map[string]string{ + "KURTOSIS_INSPECT_FIX_TRAEFIK": "true", + "KURTOSIS_INSPECT_CONDUCTOR_CONFIG": "/env/conductor.toml", + "KURTOSIS_INSPECT_ENVIRONMENT": "/env/env.json", + }, + expected: struct { + fixTraefik bool + conductorConfig string + environment string + }{ + fixTraefik: true, + conductorConfig: "/env/conductor.toml", + environment: "/env/env.json", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Set environment variables + for key, value := range tt.envVars { + os.Setenv(key, value) + defer os.Unsetenv(key) + } + + app := &cli.App{ + Name: "inspect", + Flags: Flags, + Action: func(ctx *cli.Context) error { + assert.Equal(t, tt.expected.fixTraefik, ctx.Bool("fix-traefik")) + assert.Equal(t, tt.expected.conductorConfig, ctx.String("conductor-config-path")) + assert.Equal(t, tt.expected.environment, ctx.String("environment-path")) + return nil + }, + } + + err := app.Run(tt.args) + require.NoError(t, err) + }) + } +} + +func TestFlagDefinitions(t *testing.T) { + flagNames := make(map[string]bool) + for _, flag := range Flags { + for _, name := range flag.Names() { + flagNames[name] = true + } + } + + assert.True(t, flagNames["fix-traefik"]) + assert.True(t, flagNames["conductor-config-path"]) + assert.True(t, flagNames["environment-path"]) + assert.True(t, flagNames["log.level"]) +} + +func TestEnvVarPrefix(t *testing.T) { + assert.Equal(t, "KURTOSIS_INSPECT", EnvVarPrefix) +} + +func TestFlagStructure(t *testing.T) { + assert.NotEmpty(t, Flags) + assert.Contains(t, optionalFlags, FixTraefik) + assert.Contains(t, optionalFlags, ConductorConfig) + assert.Contains(t, optionalFlags, Environment) + assert.Empty(t, requiredFlags) +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go new file mode 100644 index 0000000000000..645f952edce1e --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go @@ -0,0 +1,76 @@ +package inspect + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" +) + +func TestNewInspector(t *testing.T) { + inspector := NewInspector("test-enclave") + assert.NotNil(t, inspector) + assert.Equal(t, "test-enclave", inspector.enclaveID) +} + +func TestShortenedUUIDString(t *testing.T) { + assert.Equal(t, "f47ac10b-58c", ShortenedUUIDString("f47ac10b-58cc-4372-a567-0e02b2c3d479")) + assert.Equal(t, "abc", ShortenedUUIDString("abc")) + assert.Equal(t, "", ShortenedUUIDString("")) + assert.Equal(t, "123456789012", ShortenedUUIDString("123456789012")) + assert.Equal(t, "test2-devnet", ShortenedUUIDString("test2-devnet-2151908")) +} + +func TestInspectData(t *testing.T) { + data := &InspectData{ + FileArtifacts: []string{"genesis.json", "jwt.txt"}, + UserServices: ServiceMap{ + "op-node": &Service{ + Labels: map[string]string{"app": "op-node", "role": "sequencer"}, + Ports: PortMap{ + "rpc": &descriptors.PortInfo{Host: "127.0.0.1", Port: 8545}, + "p2p": &descriptors.PortInfo{Host: "127.0.0.1", Port: 9222}, + }, + }, + }, + } + + assert.Len(t, data.FileArtifacts, 2) + assert.Len(t, data.UserServices, 1) + assert.Contains(t, data.FileArtifacts, "genesis.json") + + service := data.UserServices["op-node"] + assert.Equal(t, "op-node", service.Labels["app"]) + assert.Equal(t, "sequencer", service.Labels["role"]) + + rpcPort, exists := service.Ports["rpc"] + require.True(t, exists) + assert.Equal(t, 8545, rpcPort.Port) + + _, exists = service.Ports["nonexistent"] + assert.False(t, exists) +} + +func TestServiceMap(t *testing.T) { + services := ServiceMap{ + "seq0": &Service{Labels: map[string]string{"role": "sequencer"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8545}}}, + "seq1": &Service{Labels: map[string]string{"role": "sequencer"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8645}}}, + "conductor": &Service{Labels: map[string]string{"app": "conductor"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8547}}}, + } + + assert.Len(t, services, 3) + + seq0, exists := services["seq0"] + require.True(t, exists) + assert.Equal(t, "sequencer", seq0.Labels["role"]) + + sequencerCount := 0 + for _, svc := range services { + if svc.Labels["role"] == "sequencer" { + sequencerCount++ + } + } + assert.Equal(t, 2, sequencerCount) +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go new file mode 100644 index 0000000000000..099e7c62ccf49 --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go @@ -0,0 +1,150 @@ +package inspect + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/BurntSushi/toml" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" +) + +// InspectService handles the core inspection functionality +type InspectService struct { + cfg *Config + log log.Logger +} + +func NewInspectService(cfg *Config, log log.Logger) *InspectService { + return &InspectService{ + cfg: cfg, + log: log, + } +} + +func (s *InspectService) Run(ctx context.Context) error { + if s.cfg.FixTraefik { + return s.fixTraefik(ctx) + } + + return s.inspect(ctx) +} + +func (s *InspectService) fixTraefik(ctx context.Context) error { + s.log.Info("Fixing Traefik network configuration...") + fmt.Println("🔧 Fixing Traefik network configuration...") + + if err := util.SetReverseProxyConfig(ctx); err != nil { + return fmt.Errorf("error setting reverse proxy config: %w", err) + } + + s.log.Info("Traefik network configuration fixed") + fmt.Println("✅ Traefik network configuration fixed!") + return nil +} + +func (s *InspectService) inspect(ctx context.Context) error { + inspector := NewInspector(s.cfg.EnclaveID) + + data, err := inspector.ExtractData(ctx) + if err != nil { + return fmt.Errorf("error inspecting enclave: %w", err) + } + + conductorConfig, err := ExtractConductorConfig(ctx, s.cfg.EnclaveID) + if err != nil { + s.log.Warn("Error extracting conductor configuration", "error", err) + } + + s.displayResults(data, conductorConfig) + + if err := s.writeFiles(data, conductorConfig); err != nil { + return fmt.Errorf("error writing output files: %w", err) + } + + return nil +} + +func (s *InspectService) displayResults(data *InspectData, conductorConfig *ConductorConfig) { + fmt.Println("File Artifacts:") + for _, artifact := range data.FileArtifacts { + fmt.Printf(" %s\n", artifact) + } + + fmt.Println("\nServices:") + for name, svc := range data.UserServices { + fmt.Printf(" %s:\n", name) + for portName, portInfo := range svc.Ports { + host := portInfo.Host + if host == "" { + host = "localhost" + } + fmt.Printf(" %s: %s:%d\n", portName, host, portInfo.Port) + } + } + + if conductorConfig != nil { + fmt.Println("\nConductor Configuration:") + fmt.Println("========================") + + if err := toml.NewEncoder(os.Stdout).Encode(conductorConfig); err != nil { + s.log.Error("Error marshaling conductor config to TOML", "error", err) + } + } +} + +func (s *InspectService) writeFiles(data *InspectData, conductorConfig *ConductorConfig) error { + if s.cfg.ConductorConfigPath != "" { + if conductorConfig == nil { + s.log.Info("No conductor services found, skipping conductor config generation") + } else { + if err := s.writeConductorConfig(s.cfg.ConductorConfigPath, conductorConfig); err != nil { + return fmt.Errorf("error writing conductor config file: %w", err) + } + fmt.Printf("Conductor configuration saved to: %s\n", s.cfg.ConductorConfigPath) + } + } + + if s.cfg.EnvironmentPath != "" { + if err := s.writeEnvironment(s.cfg.EnvironmentPath, data); err != nil { + return fmt.Errorf("error writing environment file: %w", err) + } + fmt.Printf("Environment data saved to: %s\n", s.cfg.EnvironmentPath) + } + + return nil +} + +func (s *InspectService) writeConductorConfig(path string, config *ConductorConfig) error { + out, err := os.Create(path) + if err != nil { + return fmt.Errorf("error creating conductor config file: %w", err) + } + defer out.Close() + + encoder := toml.NewEncoder(out) + if err := encoder.Encode(config); err != nil { + return fmt.Errorf("error encoding conductor config as TOML: %w", err) + } + + return nil +} + +func (s *InspectService) writeEnvironment(path string, data *InspectData) error { + out, err := os.Create(path) + if err != nil { + return fmt.Errorf("error creating environment file: %w", err) + } + defer out.Close() + + enc := json.NewEncoder(out) + enc.SetIndent("", " ") + if err := enc.Encode(data); err != nil { + return fmt.Errorf("error encoding environment: %w", err) + } + + return nil +} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go new file mode 100644 index 0000000000000..afa4590388f2c --- /dev/null +++ b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go @@ -0,0 +1,63 @@ +package inspect + +import ( + "os" + "path/filepath" + "testing" + + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" +) + +func TestInspectService(t *testing.T) { + cfg := &Config{EnclaveID: "test-enclave"} + service := NewInspectService(cfg, log.New()) + + assert.NotNil(t, service) + assert.Equal(t, cfg, service.cfg) +} + +func TestFileWriting(t *testing.T) { + tempDir := t.TempDir() + + cfg := &Config{ + EnclaveID: "test-enclave", + ConductorConfigPath: filepath.Join(tempDir, "conductor.toml"), + EnvironmentPath: filepath.Join(tempDir, "environment.json"), + } + service := NewInspectService(cfg, log.New()) + + conductorConfig := &ConductorConfig{ + Networks: map[string]*ConductorNetwork{"chain": {Sequencers: []string{"seq"}}}, + Sequencers: map[string]*ConductorSequencer{"seq": {RaftAddr: "127.0.0.1:8001", ConductorRPCURL: "http://127.0.0.1:8002", NodeRPCURL: "http://127.0.0.1:8003", Voting: true}}, + } + + inspectData := &InspectData{ + FileArtifacts: []string{"genesis.json", "jwt.txt"}, + UserServices: ServiceMap{ + "op-node": &Service{ + Labels: map[string]string{"app": "op-node"}, + Ports: PortMap{"rpc": &descriptors.PortInfo{Host: "127.0.0.1", Port: 8545}}, + }, + }, + } + + err := service.writeFiles(inspectData, conductorConfig) + require.NoError(t, err) + + assert.FileExists(t, cfg.ConductorConfigPath) + assert.FileExists(t, cfg.EnvironmentPath) + + content, err := os.ReadFile(cfg.ConductorConfigPath) + require.NoError(t, err) + assert.Contains(t, string(content), "[networks]") + assert.Contains(t, string(content), "[sequencers]") + + envContent, err := os.ReadFile(cfg.EnvironmentPath) + require.NoError(t, err) + assert.Contains(t, string(envContent), "genesis.json") + assert.Contains(t, string(envContent), "op-node") +} diff --git a/kurtosis-devnet/pkg/util/docker.go b/kurtosis-devnet/pkg/util/docker.go index 9a8c7225b1b1b..b7a322a043814 100644 --- a/kurtosis-devnet/pkg/util/docker.go +++ b/kurtosis-devnet/pkg/util/docker.go @@ -240,6 +240,11 @@ func testRPCEndpoint(endpoint RPCEndpoint) error { if strings.Contains(endpoint.Name, "supervisor") { return testSupervisor(ctx, rpcClient) } + if strings.Contains(endpoint.Name, "test-sequencer") { + // TODO: No public or unauthenticated health/status API exists for test-sequencer yet. + // Admin API is still in progress — skip readiness check until it's available. + return nil + } return testEthNode(ctx, rpcClient) } diff --git a/kurtosis-devnet/simple.yaml b/kurtosis-devnet/simple.yaml index 041553a780c86..6c7836f62b986 100644 --- a/kurtosis-devnet/simple.yaml +++ b/kurtosis-devnet/simple.yaml @@ -44,6 +44,7 @@ optimism_package: fjord_time_offset: 0 granite_time_offset: 0 holocene_time_offset: 0 + isthmus_time_offset: 0 fund_dev_accounts: true batcher_params: image: {{ localDockerImage "op-batcher" }} @@ -78,6 +79,7 @@ ethereum_package: participants: - el_type: geth cl_type: teku + cl_image: consensys/teku:25.7.1 network_params: preset: minimal genesis_delay: 5 diff --git a/kurtosis-devnet/templates/l2.yaml b/kurtosis-devnet/templates/l2.yaml index bf63d2e380c77..268cfb8a75fb5 100644 --- a/kurtosis-devnet/templates/l2.yaml +++ b/kurtosis-devnet/templates/l2.yaml @@ -14,6 +14,7 @@ network_params: granite_time_offset: 0 holocene_time_offset: 0 isthmus_time_offset: 0 + jovian_time_offset: 0 interop_time_offset: 0 fund_dev_accounts: true batcher_params: diff --git a/kurtosis-devnet/tests/.gitignore b/kurtosis-devnet/tests/.gitignore index a6c57f5fb2ffb..e3339ba9266cb 100644 --- a/kurtosis-devnet/tests/.gitignore +++ b/kurtosis-devnet/tests/.gitignore @@ -1 +1,2 @@ *.json +*.toml diff --git a/mise.toml b/mise.toml index fd3aef4aeca3f..09407762323b7 100644 --- a/mise.toml +++ b/mise.toml @@ -11,11 +11,11 @@ shellcheck = "0.10.0" direnv = "2.35.0" just = "1.37.0" -# Cargo dependencies -"cargo:svm-rs" = "0.5.8" +svm-rs = "0.5.19" # Go dependencies "go:github.com/ethereum/go-ethereum/cmd/abigen" = "1.15.10" +"go:github.com/ethereum/go-ethereum/cmd/geth" = "1.16.4" # Osaka release. "go:gotest.tools/gotestsum" = "1.12.1" "go:github.com/vektra/mockery/v2" = "2.46.0" "go:github.com/golangci/golangci-lint/cmd/golangci-lint" = "1.64.8" @@ -36,9 +36,9 @@ anvil = "1.1.0" # Other dependencies codecov-uploader = "0.8.0" -goreleaser-pro = "2.3.2-pro" +goreleaser-pro = "2.11.2" kurtosis = "1.8.1" -op-acceptor = "op-acceptor/v2.0.0" +op-acceptor = "op-acceptor/v3.5.0" # Fake dependencies # Put things here if you need to track versions of tools or projects that can't @@ -57,6 +57,7 @@ codecov-uploader = "ubi:codecov/uploader" goreleaser-pro = "ubi:goreleaser/goreleaser-pro[exe=goreleaser]" kurtosis = "ubi:kurtosis-tech/kurtosis-cli-release-artifacts[exe=kurtosis]" op-acceptor = "ubi:ethereum-optimism/infra[exe=op-acceptor,tag_prefix=op-acceptor/]" +svm-rs = "ubi:alloy-rs/svm-rs[exe=svm]" [settings] experimental = true diff --git a/op-acceptance-tests/.gitignore b/op-acceptance-tests/.gitignore index e0081f4804078..4257f78ffeb1f 100644 --- a/op-acceptance-tests/.gitignore +++ b/op-acceptance-tests/.gitignore @@ -1 +1,2 @@ .bin/ +logs diff --git a/op-acceptance-tests/README.md b/op-acceptance-tests/README.md index 183bee525b222..3b81565d39837 100644 --- a/op-acceptance-tests/README.md +++ b/op-acceptance-tests/README.md @@ -54,7 +54,6 @@ just acceptance-test "" base # Run against Kurtosis devnets (requires Docker + Kurtosis) just acceptance-test simple base -just acceptance-test isthmus isthmus just acceptance-test interop interop ``` @@ -68,7 +67,7 @@ just just acceptance-test # Use specific op-acceptor version -ACCEPTOR_VERSION=v1.0.0 just acceptance-test simple base +ACCEPTOR_VERSION=v1.0.0 just acceptance-test "" base ``` ### Direct CLI Usage @@ -96,7 +95,8 @@ For rapid test development, use in-process testing: ```bash cd op-acceptance-tests -just acceptance-test "" base # Uses sysgo orchestrator - faster! +# Not providing a network uses the sysgo orchestrator (in-memory network) which is faster and easier to iterate with. +just acceptance-test "" base ``` ### Testing Against External Devnets @@ -105,7 +105,7 @@ For integration testing against realistic networks: 1. **Automated approach** (rebuilds devnet each time): ```bash - just acceptance-test isthmus isthmus + just acceptance-test interop interop ``` 2. **Manual approach** (once-off) @@ -155,16 +155,124 @@ LOG_LEVEL=info go test -v ./op-acceptance-tests/tests/interop/sync/multisupervis To add new acceptance tests: -1. Create your test in the appropriate Go package (as a regular Go test) +1. Create your test in the appropriate Go package under `tests` (as a regular Go test) 2. Register the test in `acceptance-tests.yaml` under the appropriate gate 3. Follow the existing pattern for test registration: ```yaml - name: YourTestName - package: github.com/ethereum-optimism/optimism/your/package/path + package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/your/package/path ``` +## Flake-Shake: Test Stability Validation + +Flake-shake is a test stability validation system that runs tests multiple times to detect flakiness before they reach production gates. It serves as a quarantine area where new or potentially unstable tests must prove their reliability. + +### Purpose + +- Detect flaky tests through repeated execution (100+ iterations) +- Prevent unstable tests from disrupting CI/CD pipelines +- Provide data-driven decisions for test promotion to production gates + +### How It Works + +Flake-shake runs tests multiple times and aggregates results to determine stability: +- **STABLE**: Tests with 100% pass rate across all iterations +- **UNSTABLE**: Tests with any failures (<100% pass rate) + +### Running Flake-Shake + +Flake-shake is integrated into op-acceptor and can be run locally or in CI: + +```bash +# Run flake-shake with op-acceptor (requires op-acceptor v3.4.0+) +op-acceptor \ + --validators ./acceptance-tests.yaml \ + --gate flake-shake \ + --flake-shake \ + --flake-shake-iterations 10 \ + --orchestrator sysgo + +# Run with more iterations for thorough testing +op-acceptor \ + --validators ./acceptance-tests.yaml \ + --gate flake-shake \ + --flake-shake \ + --flake-shake-iterations 100 \ + --orchestrator sysgo +``` + +### Adding Tests to Flake-Shake + +Add new or suspicious tests to the flake-shake gate in `acceptance-tests.yaml`: + +```yaml +gates: + - id: flake-shake + description: "Test stability validation gate" + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/yourtest + timeout: 10m + metatada: + owner: stefano +``` + +### Understanding Reports + +Flake-shake stores a daily summary artifact per run: +- **`final-report/daily-summary.json`**: Aggregated counts of stable/unstable tests and per-test pass/fail tallies. + +### CI Integration + +In CI, flake-shake runs tests across multiple parallel workers: +- 10 workers each run 10 iterations (100 total by default) +- Results are aggregated using the `flake-shake-aggregator` tool +- Reports are stored as CircleCI artifacts + +### Automated Promotion (Promoter CLI) + +We provide a small CLI that aggregates the last N daily summaries from CircleCI and proposes YAML edits to promote stable tests out of the `flake-shake` gate: + +```bash +export CIRCLE_API_TOKEN=... # CircleCI API token (read artifacts) +go build -o ./op-acceptance-tests/flake-shake-promoter ./op-acceptance-tests/cmd/flake-shake-promoter/main.go +./op-acceptance-tests/flake-shake-promoter \ + --org ethereum-optimism --repo optimism --branch develop \ + --workflow scheduled-flake-shake --report-job op-acceptance-tests-flake-shake-report \ + --days 3 --gate flake-shake --min-runs 300 --max-failure-rate 0.01 --min-age-days 3 \ + --out ./final-promotion --dry-run +``` + +Outputs written to `--out`: +- `aggregate.json`: Per-test aggregated totals across days +- `promotion-ready.json`: Candidates and skip reasons +- `promotion.yaml`: Proposed edits to `op-acceptance-tests/acceptance-tests.yaml` + +### Promotion Criteria + +Tests should remain in flake-shake until they demonstrate consistent stability: +- **Immediate promotion**: 100% pass rate across 100+ iterations +- **Investigation needed**: Any failures require fixing before promotion +- **Minimum soak time**: 3 days in flake-shake gate recommended + +### Quick Development + +For rapid development and testing: + +```bash +cd op-acceptance-tests + +# Run all tests (sysgo gateless mode) - most comprehensive coverage +just acceptance-test "" "" + +# Run specific gate-based tests (traditional mode) +just acceptance-test "" base # In-process (sysgo) with gate +just acceptance-test simple base # External devnet (sysext) with gate +``` + +Using an empty gate (`""`) triggers gateless mode with the sysgo orchestrator, auto-discovering all tests. + ## Further Information For more details about `op-acceptor` and the acceptance testing process, refer to the main documentation or ask the team for guidance. -The source code for `op-acceptor` is available at [github.com/ethereum-optimism/infra/op-acceptor](https://github.com/ethereum-optimism/infra/tree/main/op-acceptor). If you discover any bugs or have feature requests, please open an issue in that repository. \ No newline at end of file +The source code for `op-acceptor` is available at [github.com/ethereum-optimism/infra/op-acceptor](https://github.com/ethereum-optimism/infra/tree/main/op-acceptor). If you discover any bugs or have feature requests, please open an issue in that repository. diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index 13c8750a6d8f0..97d0f0788583a 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -4,17 +4,58 @@ # As a rule of thumb, we recommend that each fork gate inherits from the # base gate as well as any earlier fork gates. - - gates: - - id: conductor - description: "Sanity/smoke acceptance tests for networks with conductors." + # New tests should be added here first with an owner metadata. + # Once we're confident they're not flaky, a PR will be automatically created to remove them from this gate. + # Example entry format: + # - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/parallelism + # timeout: 10m + # metadata: + # owner: "team-infra" + - id: flake-shake + description: "Quarantine gate for new and potentially flaky tests requiring stability validation." tests: - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/conductor + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext + name: TestSyncTesterHFS_Isthmus_ELSync + timeout: 10m + metadata: + owner: "changwan,anton" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian + name: TestMinBaseFee + timeout: 10m + metadata: + owner: "george" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal + name: TestWithdrawal + timeout: 10m + metadata: + owner: "stefano" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base + name: TestDummyFlakyTest + timeout: 10m + metadata: + owner: "stefano" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/osaka + owner: "josh" + + - id: isthmus + description: "Isthmus network tests." + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus + timeout: 6h + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/operator_fee + timeout: 6h + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/withdrawal_root + timeout: 20m + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/erc20_bridge + timeout: 10m + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/pectra timeout: 10m - id: base description: "Sanity/smoke acceptance tests for all networks." + inherits: + - isthmus tests: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/deposit @@ -26,28 +67,12 @@ gates: # TODO(infra#401): Re-enable the test when the sysext missing toolset is implemented #- package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal # timeout: 10m - - - id: holocene - inherits: - - base - description: "Holocene network tests." - tests: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/fjord - - id: isthmus - inherits: - - base - description: "Isthmus network tests." + - id: conductor + description: "Sanity/smoke acceptance tests for networks with conductors." tests: - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus - timeout: 6h - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/operator_fee - timeout: 6h - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/withdrawal_root - timeout: 20m - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/erc20_bridge - timeout: 10m - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus/pectra + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/conductor timeout: 10m - id: pre-interop @@ -67,6 +92,7 @@ gates: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop timeout: 10m - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/message + timeout: 30m - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/sync - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/smoke - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/contract @@ -83,4 +109,26 @@ gates: description: "Flashblocks network tests." tests: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/flashblocks - timeout: 5m \ No newline at end of file + timeout: 5m + + - id: flashblocks-with-isthmus + inherits: + - base + description: "Flashblocks network tests with Isthmus." + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/flashblocks + timeout: 5m + + - id: sync-test-op-node + description: "Sync tests for op-node with external networks via the op-sync-tester - tests run daily." + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el + timeout: 30m + + - id: jovian + inherits: + - base + description: "Jovian network tests." + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian + timeout: 10m diff --git a/op-acceptance-tests/cmd/flake-shake-aggregator/main.go b/op-acceptance-tests/cmd/flake-shake-aggregator/main.go new file mode 100644 index 0000000000000..98f5861a68b66 --- /dev/null +++ b/op-acceptance-tests/cmd/flake-shake-aggregator/main.go @@ -0,0 +1,540 @@ +// flake-shake-aggregator aggregates multiple flake-shake reports from parallel workers +// into a single comprehensive report. +package main + +import ( + "crypto/sha256" + "encoding/json" + "flag" + "fmt" + html_pkg "html" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "time" +) + +// FlakeShakeResult represents a single test's flake-shake analysis +type FlakeShakeResult struct { + TestName string `json:"test_name"` + Package string `json:"package"` + TotalRuns int `json:"total_runs"` + Passes int `json:"passes"` + Failures int `json:"failures"` + Skipped int `json:"skipped"` + PassRate float64 `json:"pass_rate"` + AvgDuration time.Duration `json:"avg_duration"` + MinDuration time.Duration `json:"min_duration"` + MaxDuration time.Duration `json:"max_duration"` + FailureLogs []string `json:"failure_logs,omitempty"` + LastFailure *time.Time `json:"last_failure,omitempty"` + Recommendation string `json:"recommendation"` +} + +// FlakeShakeReport contains the complete flake-shake analysis +type FlakeShakeReport struct { + Date string `json:"date"` + Gate string `json:"gate"` + TotalRuns int `json:"total_runs"` + Iterations int `json:"iterations"` + Tests []FlakeShakeResult `json:"tests"` + GeneratedAt time.Time `json:"generated_at"` + RunID string `json:"run_id"` +} + +// AggregatedTestStats for accumulating results +type AggregatedTestStats struct { + TestName string + Package string + TotalRuns int + Passes int + Failures int + Skipped int + MinDuration time.Duration + MaxDuration time.Duration + FailureLogs []string + LastFailure *time.Time + durationSum time.Duration + durationCount int +} + +func main() { + var ( + inputPattern string + outputDir string + verbose bool + ) + + flag.StringVar(&inputPattern, "input-pattern", "flake-shake-results-worker-*/flake-shake-report.json", + "Glob pattern to find worker report files") + flag.StringVar(&outputDir, "output-dir", "final-report", + "Directory to write the aggregated report") + flag.BoolVar(&verbose, "verbose", false, "Enable verbose output") + flag.Parse() + + if err := run(inputPattern, outputDir, verbose); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} + +func run(inputPattern, outputDir string, verbose bool) error { + logger := log.New(os.Stdout, "[flake-shake-aggregator] ", log.LstdFlags) + // Create output directory + if err := os.MkdirAll(outputDir, 0755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // Find all report files + reportFiles, err := filepath.Glob(inputPattern) + if err != nil { + return fmt.Errorf("failed to glob input files: %w", err) + } + + if len(reportFiles) == 0 { + // Try alternative patterns + alternatives := []string{ + "flake-shake-results-worker-*/flake-shake-report.json", + "*/flake-shake-report.json", + "flake-shake-report-*.json", + } + for _, alt := range alternatives { + reportFiles, err = filepath.Glob(alt) + if err == nil && len(reportFiles) > 0 { + break + } + } + + if len(reportFiles) == 0 { + return fmt.Errorf("no report files found matching pattern: %s", inputPattern) + } + } + + if verbose { + logger.Printf("Found %d report files to aggregate:", len(reportFiles)) + for _, f := range reportFiles { + logger.Printf(" - %s", f) + } + } + + // Aggregate all reports + aggregated := make(map[string]*AggregatedTestStats) + var gate string + var runID string + totalIterations := 0 + + for _, reportFile := range reportFiles { + if verbose { + logger.Printf("Processing %s...", reportFile) + } + + data, err := os.ReadFile(reportFile) + if err != nil { + logger.Printf("Warning: failed to read %s: %v", reportFile, err) + continue + } + + var report FlakeShakeReport + if err := json.Unmarshal(data, &report); err != nil { + logger.Printf("Warning: failed to parse %s: %v", reportFile, err) + continue + } + + // Use first report's metadata + if gate == "" { + gate = report.Gate + } + if runID == "" && report.RunID != "" { + runID = report.RunID + } + totalIterations += report.Iterations + + // Aggregate test results + for _, test := range report.Tests { + key := fmt.Sprintf("%s::%s", test.Package, test.TestName) + + if stats, exists := aggregated[key]; exists { + // Merge with existing stats + stats.TotalRuns += test.TotalRuns + stats.Passes += test.Passes + stats.Failures += test.Failures + stats.Skipped += test.Skipped + + // Update durations + if test.MinDuration < stats.MinDuration || stats.MinDuration == 0 { + stats.MinDuration = test.MinDuration + } + if test.MaxDuration > stats.MaxDuration { + stats.MaxDuration = test.MaxDuration + } + stats.durationSum += time.Duration(test.AvgDuration) * time.Duration(test.TotalRuns) + stats.durationCount += test.TotalRuns + + // Merge failure logs (keep first 50) + stats.FailureLogs = append(stats.FailureLogs, test.FailureLogs...) + if len(stats.FailureLogs) > 50 { + stats.FailureLogs = stats.FailureLogs[:50] + } + + // Update last failure time + if test.LastFailure != nil && (stats.LastFailure == nil || test.LastFailure.After(*stats.LastFailure)) { + stats.LastFailure = test.LastFailure + } + } else { + // First occurrence of this test + aggregated[key] = &AggregatedTestStats{ + TestName: test.TestName, + Package: test.Package, + TotalRuns: test.TotalRuns, + Passes: test.Passes, + Failures: test.Failures, + Skipped: test.Skipped, + MinDuration: test.MinDuration, + MaxDuration: test.MaxDuration, + durationSum: time.Duration(test.AvgDuration) * time.Duration(test.TotalRuns), + durationCount: test.TotalRuns, + FailureLogs: test.FailureLogs, + LastFailure: test.LastFailure, + } + } + } + } + + // Calculate final statistics + var finalTests []FlakeShakeResult + totalTestRuns := 0 + for _, stats := range aggregated { + // Calculate pass rate + passRate := 0.0 + if stats.TotalRuns > 0 { + passRate = float64(stats.Passes) / float64(stats.TotalRuns) * 100 + } + + // Calculate average duration + avgDuration := time.Duration(0) + if stats.durationCount > 0 { + avgDuration = stats.durationSum / time.Duration(stats.durationCount) + } + + // Determine recommendation + recommendation := "UNSTABLE" + if passRate == 100 { + recommendation = "STABLE" + } + + // Convert to final format + totalTestRuns += stats.TotalRuns + finalTests = append(finalTests, FlakeShakeResult{ + TestName: stats.TestName, + Package: stats.Package, + TotalRuns: stats.TotalRuns, + Passes: stats.Passes, + Failures: stats.Failures, + Skipped: stats.Skipped, + PassRate: passRate, + AvgDuration: avgDuration, + MinDuration: stats.MinDuration, + MaxDuration: stats.MaxDuration, + FailureLogs: stats.FailureLogs, + LastFailure: stats.LastFailure, + Recommendation: recommendation, + }) + } + + // Create final aggregated report + finalReport := FlakeShakeReport{ + Date: time.Now().Format("2006-01-02"), + Gate: gate, + TotalRuns: totalTestRuns, + Iterations: totalIterations, + Tests: finalTests, + GeneratedAt: time.Now(), + RunID: runID, + } + + // Save JSON report + jsonFile := filepath.Join(outputDir, "flake-shake-report.json") + jsonData, err := json.MarshalIndent(finalReport, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal report: %w", err) + } + + if err := os.WriteFile(jsonFile, jsonData, 0644); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + + // Generate and save HTML report + htmlFile := filepath.Join(outputDir, "flake-shake-report.html") + htmlContent := generateHTMLReport(&finalReport) + if err := os.WriteFile(htmlFile, []byte(htmlContent), 0644); err != nil { + return fmt.Errorf("failed to write HTML report: %w", err) + } + + logger.Printf("✅ Aggregation complete!") + logger.Printf(" - Processed %d worker reports", len(reportFiles)) + logger.Printf(" - Aggregated %d unique tests", len(finalTests)) + logger.Printf(" - Total iterations: %d", totalIterations) + logger.Printf(" - Reports saved to:") + logger.Printf(" • %s", jsonFile) + logger.Printf(" • %s", htmlFile) + + // Print summary statistics + stableCount := 0 + unstableCount := 0 + for _, test := range finalTests { + if test.Recommendation == "STABLE" { + stableCount++ + } else { + unstableCount++ + } + } + + logger.Printf("\n📊 Test Stability Summary:") + if len(finalTests) > 0 { + logger.Printf(" - STABLE: %d tests (%.1f%%)", stableCount, + float64(stableCount)/float64(len(finalTests))*100) + logger.Printf(" - UNSTABLE: %d tests (%.1f%%)", unstableCount, + float64(unstableCount)/float64(len(finalTests))*100) + } else { + logger.Printf(" - No tests found") + } + + // List unstable tests if any + if unstableCount > 0 && verbose { + logger.Printf("\n⚠️ Unstable tests:") + for _, test := range finalTests { + if test.Recommendation == "UNSTABLE" { + logger.Printf(" - %s (%.1f%% pass rate)", + strings.TrimPrefix(test.TestName, test.Package+"::"), + test.PassRate) + } + } + } + + return nil +} + +func generateHTMLReport(report *FlakeShakeReport) string { + var html strings.Builder + + html.WriteString(` + + + Flake-Shake Report + + + +
+

Flake-Shake Report - ` + html_pkg.EscapeString(report.Gate) + `

+

Generated: ` + report.GeneratedAt.Format("2006-01-02 15:04:05") + `

+ +
+
+

Total Tests

+
` + fmt.Sprintf("%d", len(report.Tests)) + `
+
+
+

Iterations

+
` + fmt.Sprintf("%d", report.Iterations) + `
+
+
+

Stable Tests

+
`) + + stableCount := 0 + for _, test := range report.Tests { + if test.Recommendation == "STABLE" { + stableCount++ + } + } + html.WriteString(fmt.Sprintf("%d", stableCount)) + + html.WriteString(`
+
+
+

Unstable Tests

+
`) + + html.WriteString(fmt.Sprintf("%d", len(report.Tests)-stableCount)) + + html.WriteString(`
+
+
+ +

Stable Tests

`) + + if stableCount > 0 { + html.WriteString(` +
    `) + for _, test := range report.Tests { + if test.Recommendation == "STABLE" { + html.WriteString(fmt.Sprintf(` +
  • %s (%s)
  • `, + html_pkg.EscapeString(test.TestName), + html_pkg.EscapeString(test.Package), + )) + } + } + html.WriteString(` +
`) + } else { + html.WriteString(` +

No stable tests in this run.

`) + } + + html.WriteString(` + + + + + + + + + + + + + + `) + + for _, test := range report.Tests { + rowClass := "" + if test.PassRate == 100 { + rowClass = "pass-rate-100" + } else if test.PassRate < 95 { + rowClass = "pass-rate-low" + } + + html.WriteString(fmt.Sprintf(` + + + + + + + + + + `, + rowClass, + html_pkg.EscapeString(test.TestName), + html_pkg.EscapeString(test.Package), + test.PassRate, + test.TotalRuns, + test.Passes, + test.Failures, + test.AvgDuration.Round(time.Millisecond), + strings.ToLower(test.Recommendation), + test.Recommendation, + )) + } + + html.WriteString(` + +
Test NamePackagePass RateRunsPassedFailedAvg DurationStatus
%s%s%.1f%%%d%d%d%s%s
+`) + + // Append grouped failure details + html.WriteString(` +

Failure Details

+`) + + normalizer := regexp.MustCompile(`(?m)^\s*\[?\d{4}-\d{2}-\d{2}.*$|\bt=\d{4}-\d{2}-\d{2}.*$|\b(duration|elapsed|took)[:=].*$`) + classify := func(s string) string { + ls := strings.ToLower(s) + switch { + case strings.Contains(ls, "context deadline exceeded"): + return "context deadline" + case strings.Contains(ls, "deadline exceeded"): + return "deadline exceeded" + case strings.Contains(ls, "timeout"): + return "timeout" + case strings.Contains(ls, "connection refused"): + return "connection refused" + case strings.Contains(ls, "connection reset"): + return "connection reset" + case strings.Contains(ls, "rpc error") || strings.Contains(ls, "rpc call failed"): + return "rpc error" + case strings.Contains(ls, "assert") || strings.Contains(ls, "require"): + return "assertion" + default: + return "unknown" + } + } + for _, test := range report.Tests { + if len(test.FailureLogs) == 0 { + continue + } + html.WriteString(fmt.Sprintf(`
%s — %s (failures: %d)`, + html_pkg.EscapeString(test.TestName), html_pkg.EscapeString(test.Package), test.Failures)) + + groups := map[string]struct { + Count int + Sample string + Type string + }{} + typeSummary := map[string]int{} + for _, raw := range test.FailureLogs { + norm := normalizer.ReplaceAllString(raw, "") + norm = strings.TrimSpace(norm) + sum := sha256.Sum256([]byte(norm)) + key := fmt.Sprintf("%x", sum[:]) + g := groups[key] + if g.Count == 0 { + g.Sample = norm + g.Type = classify(norm) + } + g.Count++ + groups[key] = g + } + // Build type summary + for _, g := range groups { + typeSummary[g.Type] += g.Count + } + // Render summary + html.WriteString(`
`) + html.WriteString(`Summary:
    `) + for t, c := range typeSummary { + html.WriteString(fmt.Sprintf(`
  • %s: %d
  • `, html_pkg.EscapeString(t), c)) + } + html.WriteString(`
`) + // Render groups + for _, g := range groups { + html.WriteString(`
`) + html.WriteString(fmt.Sprintf(`
Type: %s
`, html_pkg.EscapeString(g.Type))) + html.WriteString(fmt.Sprintf(`
Occurrences: %d
`, g.Count)) + html.WriteString(`
` + html_pkg.EscapeString(g.Sample) + `
`) + html.WriteString(`
`) + } + html.WriteString(`
`) + } + + html.WriteString(` +
+ +`) + + return html.String() +} diff --git a/op-acceptance-tests/cmd/flake-shake-aggregator/main_test.go b/op-acceptance-tests/cmd/flake-shake-aggregator/main_test.go new file mode 100644 index 0000000000000..1f30f74afe5a9 --- /dev/null +++ b/op-acceptance-tests/cmd/flake-shake-aggregator/main_test.go @@ -0,0 +1,106 @@ +package main + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + "time" +) + +func writeReport(t *testing.T, dir, name string, r FlakeShakeReport) string { + t.Helper() + b, err := json.Marshal(r) + if err != nil { + t.Fatalf("marshal: %v", err) + } + p := filepath.Join(dir, name) + if err := os.WriteFile(p, b, 0644); err != nil { + t.Fatalf("write: %v", err) + } + return p +} + +func TestRunAggregatesReports(t *testing.T) { + tmp := t.TempDir() + // create two worker reports + r1 := FlakeShakeReport{ + Date: "2025-01-01", + Gate: "flake-shake", + Iterations: 10, + Tests: []FlakeShakeResult{{ + TestName: "pkg::T1", + Package: "pkg", + TotalRuns: 10, + Passes: 9, + Failures: 1, + Skipped: 0, + AvgDuration: 100 * time.Millisecond, + MinDuration: 80 * time.Millisecond, + MaxDuration: 120 * time.Millisecond, + }}, + GeneratedAt: time.Now(), + RunID: "abc", + } + r2 := FlakeShakeReport{ + Date: "2025-01-01", + Gate: "flake-shake", + Iterations: 5, + Tests: []FlakeShakeResult{{ + TestName: "pkg::T1", + Package: "pkg", + TotalRuns: 5, + Passes: 5, + Failures: 0, + Skipped: 0, + AvgDuration: 90 * time.Millisecond, + MinDuration: 70 * time.Millisecond, + MaxDuration: 110 * time.Millisecond, + }}, + GeneratedAt: time.Now(), + RunID: "abc", + } + // Place files under pattern + d1 := filepath.Join(tmp, "flake-shake-results-worker-1") + d2 := filepath.Join(tmp, "flake-shake-results-worker-2") + if err := os.MkdirAll(d1, 0755); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(d2, 0755); err != nil { + t.Fatal(err) + } + writeReport(t, d1, "flake-shake-report.json", r1) + writeReport(t, d2, "flake-shake-report.json", r2) + + out := filepath.Join(tmp, "final") + if err := run(filepath.Join(tmp, "flake-shake-results-worker-*/flake-shake-report.json"), out, false); err != nil { + t.Fatalf("run error: %v", err) + } + // verify outputs exist + if _, err := os.Stat(filepath.Join(out, "flake-shake-report.json")); err != nil { + t.Fatalf("missing json report: %v", err) + } + if _, err := os.Stat(filepath.Join(out, "flake-shake-report.html")); err != nil { + t.Fatalf("missing html report: %v", err) + } +} + +func TestGenerateHTMLReportBasic(t *testing.T) { + r := &FlakeShakeReport{ + Gate: "flake-shake", + Iterations: 15, + Tests: []FlakeShakeResult{ + {TestName: "pkg::T1", Package: "pkg", TotalRuns: 10, Passes: 10, Failures: 0, PassRate: 100}, + {TestName: "pkg::T2", Package: "pkg", TotalRuns: 5, Passes: 4, Failures: 1, PassRate: 80}, + }, + GeneratedAt: time.Now(), + } + html := generateHTMLReport(r) + if len(html) == 0 { + t.Fatal("empty html") + } + if !strings.Contains(html, "Flake-Shake Report") { + t.Fatal("missing title") + } +} diff --git a/op-acceptance-tests/cmd/flake-shake-promoter/main.go b/op-acceptance-tests/cmd/flake-shake-promoter/main.go new file mode 100644 index 0000000000000..77e57769a94c8 --- /dev/null +++ b/op-acceptance-tests/cmd/flake-shake-promoter/main.go @@ -0,0 +1,1117 @@ +package main + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "flag" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "sort" + "strings" + "time" + + github "github.com/google/go-github/v55/github" // newer version of Go is needed for the latest GitHub API + "golang.org/x/oauth2" + yaml "gopkg.in/yaml.v3" +) + +var logger *log.Logger + +// CircleCI API models +type pipelineList struct { + Items []pipeline `json:"items"` + NextPageToken string `json:"next_page_token"` +} + +type pipeline struct { + ID string `json:"id"` + CreatedAt time.Time `json:"created_at"` +} + +type workflowList struct { + Items []workflow `json:"items"` + NextPageToken string `json:"next_page_token"` +} + +type workflow struct { + ID string `json:"id"` + Name string `json:"name"` +} + +type jobList struct { + Items []job `json:"items"` + NextPageToken string `json:"next_page_token"` +} + +type job struct { + Name string `json:"name"` + JobNumber int `json:"job_number"` +} + +type artifactsList struct { + Items []artifact `json:"items"` +} + +type artifact struct { + URL string `json:"url"` + Path string `json:"path"` +} + +// Daily summary (as produced in CI job) +type DailySummary struct { + Date string `json:"date"` + Gate string `json:"gate"` + TotalRuns int `json:"total_runs"` + Iterations int `json:"iterations"` + Totals struct { + Stable int `json:"stable"` + Unstable int `json:"unstable"` + } `json:"totals"` + StableTests []struct { + TestName string `json:"test_name"` + Package string `json:"package"` + TotalRuns int `json:"total_runs"` + PassRate float64 `json:"pass_rate"` + } `json:"stable_tests"` + UnstableTests []struct { + TestName string `json:"test_name"` + Package string `json:"package"` + TotalRuns int `json:"total_runs"` + Passes int `json:"passes"` + Failures int `json:"failures"` + PassRate float64 `json:"pass_rate"` + } `json:"unstable_tests"` +} + +// Acceptance tests YAML models +type acceptanceYAML struct { + Gates []gateYAML `yaml:"gates"` +} + +type gateYAML struct { + ID string `yaml:"id"` + Description string `yaml:"description,omitempty"` + Inherits []string `yaml:"inherits,omitempty"` + Tests []testEntry `yaml:"tests,omitempty"` +} + +type testEntry struct { + Name string `yaml:"name,omitempty"` + Package string `yaml:"package"` + Timeout string `yaml:"timeout,omitempty"` + Metadata map[string]interface{} `yaml:"metadata,omitempty"` +} + +// Aggregated per test across days +type aggStats struct { + Package string `json:"package"` + TestName string `json:"test_name"` + TotalRuns int `json:"total_runs"` + Passes int `json:"passes"` + Failures int `json:"failures"` + FirstSeenDay string `json:"first_seen_day"` + LastSeenDay string `json:"last_seen_day"` + LastFailureAt *time.Time `json:"last_failure_at,omitempty"` + DaysObserved []string `json:"days_observed"` +} + +type promoteCandidate struct { + Package string `json:"package"` + TestName string `json:"test_name"` + TotalRuns int `json:"total_runs"` + PassRate float64 `json:"pass_rate"` + Timeout string `json:"timeout"` + FirstSeenDay string `json:"first_seen_day"` +} + +// Map tests in flake-shake: key -> (timeout, name) +type testInfo struct { + Timeout string + Name string + Meta map[string]interface{} + GateIndex int + TestIndex int +} + +func main() { + opts := parsePromoterFlags() + + logger = log.New(os.Stdout, "[flake-shake-promoter] ", log.LstdFlags) + if opts.verbose { + logger.Printf("Flags: org=%s repo=%s branch=%s workflow=%s report_job=%s days=%d gate=%s min_runs=%d max_failure_rate=%.4f min_age_days=%d require_clean_24h=%t out=%s dry_run=%t", + opts.org, opts.repo, opts.branch, opts.workflowName, opts.reportJobName, opts.daysBack, opts.gateID, opts.minRuns, opts.maxFailureRate, opts.minAgeDays, opts.requireClean24h, opts.outDir, opts.dryRun, + ) + } + + token := requireEnv("CIRCLE_API_TOKEN") + if err := ensureDirExists(opts.outDir); err != nil { + fmt.Fprintf(os.Stderr, "failed to create out dir: %v\n", err) + os.Exit(1) + } + + now := time.Now().UTC() + since := now.AddDate(0, 0, -opts.daysBack) + + client := &http.Client{Timeout: 30 * time.Second} + ctx := &apiCtx{client: client, token: token} + + dailyReports, err := collectReports(ctx, opts.org, opts.repo, opts.branch, opts.workflowName, opts.reportJobName, since, opts.verbose) + if err != nil { + fmt.Fprintf(os.Stderr, "collection failed: %v\n", err) + os.Exit(1) + } + + agg := aggregate(dailyReports) + + logDailyReportSummary(dailyReports, opts.verbose) + + // Load acceptance-tests.yaml + yamlPath := filepath.Join("op-acceptance-tests", "acceptance-tests.yaml") + cfg, err := readAcceptanceYAML(yamlPath) + if err != nil { + fmt.Fprintf(os.Stderr, "failed reading %s: %v\n", yamlPath, err) + os.Exit(1) + } + + // Build indices for flake-shake tests and target gates + flakeTests, flakeGate, gateIndex := buildFlakeTests(&cfg, opts.gateID, yamlPath) + _ = gateIndex + + // Select promotion candidates + candidates, reasons := selectPromotionCandidates(agg, flakeTests, opts.minRuns, opts.maxFailureRate, opts.requireClean24h, opts.minAgeDays, now) + + // Write outputs + if err := writeJSON(filepath.Join(opts.outDir, "aggregate.json"), agg); err != nil { + fmt.Fprintf(os.Stderr, "failed writing aggregate: %v\n", err) + os.Exit(1) + } + sort.Slice(candidates, func(i, j int) bool { + if candidates[i].Package == candidates[j].Package { + return candidates[i].TestName < candidates[j].TestName + } + return candidates[i].Package < candidates[j].Package + }) + if err := writeJSON(filepath.Join(opts.outDir, "promotion-ready.json"), map[string]interface{}{"candidates": candidates, "skipped": reasons}); err != nil { + fmt.Fprintf(os.Stderr, "failed writing promotion-ready: %v\n", err) + os.Exit(1) + } + + if opts.verbose { + fmt.Printf("Promotion candidates: %d\n", len(candidates)) + for _, c := range candidates { + name := c.TestName + if strings.TrimSpace(name) == "" { + name = "(package)" + } + fmt.Printf(" - %s %s (runs=%d pass=%.2f%%)\n", c.Package, name, c.TotalRuns, c.PassRate) + } + } + + // Write metadata for downstream consumers (e.g., Slack) + meta := map[string]interface{}{ + "date": now.Format("2006-01-02"), + "candidates": len(candidates), + "flake_gate_tests": len(flakeGate.Tests), + } + if err := writeJSON(filepath.Join(opts.outDir, "metadata.json"), meta); err != nil { + fmt.Fprintf(os.Stderr, "failed writing metadata: %v\n", err) + os.Exit(1) + } + + // Generate updated YAML (proposal) + updated := computeUpdatedConfig(cfg, opts.gateID, candidates) + + // Write proposed YAML + outYAML := filepath.Join(opts.outDir, "promotion.yaml") + if err := writeYAML(outYAML, &updated); err != nil { + fmt.Fprintf(os.Stderr, "failed writing promotion.yaml: %v\n", err) + os.Exit(1) + } + + // Print short summary + if len(candidates) == 0 { + reason := buildNoCandidatesSummary(agg, flakeTests, opts.minAgeDays, opts.requireClean24h) + _ = os.WriteFile(filepath.Join(opts.outDir, "SUMMARY.txt"), []byte(reason+"\n"), 0o644) + logger.Println(reason) + return + } + var b bytes.Buffer + b.WriteString("Promotion candidates (dry-run):\n") + for _, c := range candidates { + b.WriteString(fmt.Sprintf("- %s %s (runs=%d, pass=%.2f%%)\n", c.Package, c.TestName, c.TotalRuns, c.PassRate)) + } + _ = os.WriteFile(filepath.Join(opts.outDir, "SUMMARY.txt"), b.Bytes(), 0o644) + logger.Print(b.String()) + + if opts.dryRun { + logger.Println("Dry-run enabled; skipping branch creation, file update, and PR creation.") + return + } + + // Prepare updated YAML content for PR by editing only the flake-shake gate in-place to preserve comments + var updatedYAMLBytes []byte + + prBranch := fmt.Sprintf("ci/flake-shake-promote/%s", time.Now().UTC().Format("2006-01-02-150405")) + + // Prepare commit message and PR body + title := "chore(op-acceptance-tests): flake-shake; test promotions" + var body bytes.Buffer + body.WriteString("## 🤖 Automated Flake-Shake Test Promotion\n\n") + body.WriteString(fmt.Sprintf("Promoting %d test(s) from gate `"+opts.gateID+"` based on stability criteria.\n\n", len(candidates))) + body.WriteString("### Tests Being Promoted\n\n") + body.WriteString("| Test | Package | Total Runs | Pass Rate |\n|---|---|---:|---:|\n") + for _, c := range candidates { + name := c.TestName + if strings.TrimSpace(name) == "" { + name = "(package)" + } + body.WriteString(fmt.Sprintf("| %s | %s | %d | %.2f%% |\n", name, c.Package, c.TotalRuns, c.PassRate)) + } + body.WriteString("\nThis PR was auto-generated by flake-shake promoter.\n") + + // Use GitHub API to create branch, update file, and open PR + ghToken := os.Getenv("GH_TOKEN") + if ghToken == "" { + fmt.Fprintln(os.Stderr, "GH_TOKEN is required for PR creation but not set") + os.Exit(1) + } + ghCtx := context.Background() + ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: ghToken}) + tc := oauth2.NewClient(ghCtx, ts) + ghc := github.NewClient(tc) + + if opts.verbose { + logger.Printf("PR: starting creation process (base_branch=%s candidates=%d)", opts.branch, len(candidates)) + } + + // 1) Get base branch ref + baseRef, _, err := ghc.Git.GetRef(ghCtx, opts.org, opts.repo, "refs/heads/"+opts.branch) + if err != nil || baseRef.Object == nil || baseRef.Object.SHA == nil { + fmt.Fprintf(os.Stderr, "failed to get base ref: %v\n", err) + os.Exit(1) + } + if opts.verbose { + logger.Printf("PR: base ref resolved sha=%s", baseRef.GetObject().GetSHA()) + } + + // 2) Create new branch ref + newRef := &github.Reference{ + Ref: github.String("refs/heads/" + prBranch), + Object: &github.GitObject{SHA: baseRef.Object.SHA}, + } + if _, _, err := ghc.Git.CreateRef(ghCtx, opts.org, opts.repo, newRef); err != nil { + fmt.Fprintf(os.Stderr, "failed to create ref: %v\n", err) + os.Exit(1) + } + if opts.verbose { + logger.Printf("PR: created branch %s", prBranch) + } + + // 3) Read current file to fetch SHA (if exists) on base branch + path := yamlPath + var sha *string + var originalYAML []byte + if fileContent, _, resp, err := ghc.Repositories.GetContents(ghCtx, opts.org, opts.repo, path, &github.RepositoryContentGetOptions{Ref: opts.branch}); err == nil && fileContent != nil { + sha = fileContent.SHA + // Retrieve decoded file content via client helper + rawContent, gcErr := fileContent.GetContent() + if gcErr == nil && rawContent != "" { + originalYAML = []byte(rawContent) + } + } else if resp != nil && resp.StatusCode == 404 { + sha = nil + } else if err != nil { + fmt.Fprintf(os.Stderr, "failed to get contents: %v\n", err) + os.Exit(1) + } + + // Build updated YAML by removing promoted tests only from flake-shake gate, preserving comments + promoteKeys := map[string]promoteCandidate{} + for _, c := range candidates { + promoteKeys[keyFor(c.Package, c.TestName)] = c + } + updatedYAMLBytes, err = updateFlakeShakeGateOnly(originalYAML, opts.gateID, promoteKeys) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to update YAML: %v\n", err) + os.Exit(1) + } + + // 4) Update file in new branch + commitMsg := title + if _, _, err = ghc.Repositories.UpdateFile(ghCtx, opts.org, opts.repo, path, &github.RepositoryContentFileOptions{ + Message: github.String(commitMsg), + Content: updatedYAMLBytes, + Branch: github.String(prBranch), + SHA: sha, + }); err != nil { + fmt.Fprintf(os.Stderr, "failed to update file: %v\n", err) + os.Exit(1) + } + if opts.verbose { + logger.Printf("PR: updated file %s on branch %s", path, prBranch) + } + + // 5) Create PR + prReq := &github.NewPullRequest{ + Title: github.String(title), + Head: github.String(prBranch), + Base: github.String(opts.branch), + Body: github.String(body.String()), + } + pr, _, err := ghc.PullRequests.Create(ghCtx, opts.org, opts.repo, prReq) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to create PR: %v\n", err) + os.Exit(1) + } + logger.Printf("PR created: %s (number=%d)", pr.GetHTMLURL(), pr.GetNumber()) + + // Update metadata with PR details for downstream Slack notification + meta["pr_url"] = pr.GetHTMLURL() + meta["pr_number"] = pr.GetNumber() + if err := writeJSON(filepath.Join(opts.outDir, "metadata.json"), meta); err != nil { + fmt.Fprintf(os.Stderr, "failed updating metadata with PR info: %v\n", err) + } + + // 6) Add labels + if _, _, err := ghc.Issues.AddLabelsToIssue(ghCtx, opts.org, opts.repo, pr.GetNumber(), []string{"M-ci", "A-acceptance-tests"}); err != nil { + fmt.Fprintf(os.Stderr, "failed to add labels: %v\n", err) + } + + // 7) Request reviewers (user and team slug) + if _, _, err := ghc.PullRequests.RequestReviewers(ghCtx, opts.org, opts.repo, pr.GetNumber(), github.ReviewersRequest{ + Reviewers: []string{"scharissis"}, + TeamReviewers: []string{"platforms-team"}, + }); err != nil { + fmt.Fprintf(os.Stderr, "failed to request reviewers: %v\n", err) + } +} + +// promoterOpts holds command-line options for the promoter tool. +type promoterOpts struct { + org string + repo string + branch string + workflowName string + reportJobName string + daysBack int + gateID string + minRuns int + maxFailureRate float64 + minAgeDays int + outDir string + dryRun bool + requireClean24h bool + verbose bool +} + +func parsePromoterFlags() promoterOpts { + var opts promoterOpts + flag.StringVar(&opts.org, "org", "ethereum-optimism", "GitHub org") + flag.StringVar(&opts.repo, "repo", "optimism", "GitHub repo") + flag.StringVar(&opts.branch, "branch", "develop", "Branch to scan") + flag.StringVar(&opts.workflowName, "workflow", "scheduled-flake-shake", "Workflow name") + flag.StringVar(&opts.reportJobName, "report-job", "op-acceptance-tests-flake-shake-report", "Report job name") + flag.IntVar(&opts.daysBack, "days", 3, "Number of days to aggregate") + flag.StringVar(&opts.gateID, "gate", "flake-shake", "Gate id in acceptance-tests.yaml") + flag.IntVar(&opts.minRuns, "min-runs", 300, "Minimum total runs required") + flag.Float64Var(&opts.maxFailureRate, "max-failure-rate", 0.01, "Maximum allowed failure rate") + flag.IntVar(&opts.minAgeDays, "min-age-days", 2, "Minimum age in days in flake-shake") + flag.StringVar(&opts.outDir, "out", "./promotion-output", "Output directory") + flag.BoolVar(&opts.dryRun, "dry-run", true, "Do not modify repo or open PRs") + flag.BoolVar(&opts.requireClean24h, "require-clean-24h", false, "Require no failures in the last 24 hours") + flag.BoolVar(&opts.verbose, "verbose", false, "Enable verbose debug logging") + flag.Parse() + // Validate interdependent options early to avoid confusing outcomes later + if opts.daysBack < opts.minAgeDays { + fmt.Fprintf(os.Stderr, "invalid flags: --days (%d) must be >= --min-age-days (%d)\n", opts.daysBack, opts.minAgeDays) + os.Exit(2) + } + if opts.requireClean24h && opts.daysBack < 2 { + fmt.Fprintf(os.Stderr, "invalid flags: --days (%d) must be >= 2 when --require-clean-24h is set to ensure >24h coverage\n", opts.daysBack) + os.Exit(2) + } + return opts +} + +func requireEnv(name string) string { + v := os.Getenv(name) + if v == "" { + fmt.Fprintf(os.Stderr, "%s is not set\n", name) + os.Exit(1) + } + return v +} + +func ensureDirExists(dir string) error { + return os.MkdirAll(dir, 0o755) +} + +func logDailyReportSummary(dailyReports map[string]DailySummary, verbose bool) { + if !verbose { + return + } + logger.Printf("Collected %d day(s) of summaries.", len(dailyReports)) + totalTests := 0 + for date, ds := range dailyReports { + n := len(ds.StableTests) + len(ds.UnstableTests) + totalTests += n + logger.Printf(" - %s: %d tests (stable=%d unstable=%d)", date, n, len(ds.StableTests), len(ds.UnstableTests)) + } + logger.Printf("Total tests across days: %d", totalTests) +} + +// buildFlakeTests returns a map of tests in the flake-shake gate and also returns +// the flake gate reference and a gate index map for potential future use. +func buildFlakeTests(cfg *acceptanceYAML, gateID, yamlPath string) (map[string]testInfo, *gateYAML, map[string]*gateYAML) { + flakeGate := findGate(cfg, gateID) + if flakeGate == nil { + fmt.Fprintf(os.Stderr, "gate %s not found in %s\n", gateID, yamlPath) + os.Exit(1) + } + gateIndex := map[string]*gateYAML{} + for i := range cfg.Gates { + gateIndex[cfg.Gates[i].ID] = &cfg.Gates[i] + } + flakeTests := map[string]testInfo{} + for ti, t := range flakeGate.Tests { + key := keyFor(t.Package, t.Name) + flakeTests[key] = testInfo{Timeout: t.Timeout, Name: t.Name, Meta: t.Metadata, GateIndex: indexOfGate(cfg, gateID), TestIndex: ti} + } + return flakeTests, flakeGate, gateIndex +} + +func selectPromotionCandidates(agg map[string]*aggStats, flakeTests map[string]testInfo, minRuns int, maxFailureRate float64, requireClean24h bool, minAgeDays int, now time.Time) ([]promoteCandidate, map[string]string) { + candidates := []promoteCandidate{} + reasons := map[string]string{} + // Identify wildcard package entries (tests with empty name in the flake-shake gate) + wildcardPkgs := map[string]testInfo{} + for k, info := range flakeTests { + if strings.TrimSpace(info.Name) == "" && strings.HasSuffix(k, "::") { + pkg := strings.TrimSuffix(k, "::") + if pkg != "" { + wildcardPkgs[pkg] = info + } + } + } + + // Produce package-level candidates for wildcard entries by aggregating all tests in the package + for pkg, info := range wildcardPkgs { + totalRuns := 0 + totalPasses := 0 + totalFailures := 0 + earliest := "" + var lastFailureAt *time.Time + for _, s := range agg { + if s.Package != pkg { + continue + } + totalRuns += s.TotalRuns + totalPasses += s.Passes + totalFailures += s.Failures + if earliest == "" || (s.FirstSeenDay != "" && s.FirstSeenDay < earliest) { + earliest = s.FirstSeenDay + } + if s.LastFailureAt != nil { + if lastFailureAt == nil || s.LastFailureAt.After(*lastFailureAt) { + lastFailureAt = s.LastFailureAt + } + } + } + if totalRuns == 0 { + reasons[keyFor(pkg, "")] = "no runs observed for package" + continue + } + if totalRuns < minRuns { + reasons[keyFor(pkg, "")] = fmt.Sprintf("insufficient runs: %d < %d (pkg)", totalRuns, minRuns) + continue + } + failureRate := 0.0 + if totalRuns > 0 { + failureRate = float64(totalFailures) / float64(totalRuns) + } + if failureRate > maxFailureRate { + reasons[keyFor(pkg, "")] = fmt.Sprintf("failure rate %.4f exceeds max %.4f (pkg)", failureRate, maxFailureRate) + continue + } + if requireClean24h && lastFailureAt != nil { + if time.Since(*lastFailureAt) < 24*time.Hour { + reasons[keyFor(pkg, "")] = "failure within last 24h (pkg)" + continue + } + } + if earliest == "" { + reasons[keyFor(pkg, "")] = "no age information (pkg)" + continue + } + firstDay, _ := time.Parse("2006-01-02", earliest) + daysInGate := int(now.Sub(firstDay).Hours()/24) + 1 + if daysInGate < minAgeDays { + reasons[keyFor(pkg, "")] = fmt.Sprintf("min age %dd not met (have %dd) (pkg)", minAgeDays, daysInGate) + continue + } + passRate := 0.0 + if totalRuns > 0 { + passRate = float64(totalPasses) / float64(totalRuns) + } + candidates = append(candidates, promoteCandidate{ + Package: pkg, + TestName: "", + TotalRuns: totalRuns, + PassRate: passRate * 100.0, + Timeout: info.Timeout, + FirstSeenDay: earliest, + }) + } + for key, s := range agg { + // Skip per-test candidates for any package that is handled via wildcard aggregation + if _, hasWildcard := wildcardPkgs[s.Package]; hasWildcard { + continue + } + info, ok := flakeTests[key] + if !ok { + // Support wildcard package entries in the flake-shake gate where name is omitted. + // Treat a gate entry with empty name as a wildcard that matches all tests in that package. + if wi, wok := flakeTests[keyFor(s.Package, "")]; wok { + info = wi + } else { + continue + } + } + if s.TotalRuns < minRuns { + reasons[key] = fmt.Sprintf("insufficient runs: %d < %d", s.TotalRuns, minRuns) + continue + } + failureRate := 0.0 + if s.TotalRuns > 0 { + failureRate = float64(s.Failures) / float64(s.TotalRuns) + } + if failureRate > maxFailureRate { + reasons[key] = fmt.Sprintf("failure rate %.4f exceeds max %.4f", failureRate, maxFailureRate) + continue + } + if requireClean24h && s.LastFailureAt != nil { + if time.Since(*s.LastFailureAt) < 24*time.Hour { + reasons[key] = "failure within last 24h" + continue + } + } + if s.FirstSeenDay == "" { + reasons[key] = "no age information" + continue + } + firstDay, _ := time.Parse("2006-01-02", s.FirstSeenDay) + daysInGate := int(now.Sub(firstDay).Hours()/24) + 1 + if daysInGate < minAgeDays { + reasons[key] = fmt.Sprintf("min age %dd not met (have %dd)", minAgeDays, daysInGate) + continue + } + passRate := 0.0 + if s.TotalRuns > 0 { + passRate = float64(s.Passes) / float64(s.TotalRuns) + } + candidates = append(candidates, promoteCandidate{ + Package: s.Package, + TestName: s.TestName, + TotalRuns: s.TotalRuns, + PassRate: passRate * 100.0, + Timeout: info.Timeout, + FirstSeenDay: s.FirstSeenDay, + }) + } + return candidates, reasons +} + +func computeUpdatedConfig(cfg acceptanceYAML, gateID string, candidates []promoteCandidate) acceptanceYAML { + updated := cfg + flakeIdx := indexOfGate(&updated, gateID) + if flakeIdx < 0 { + fmt.Fprintf(os.Stderr, "gate %s not found when updating\n", gateID) + os.Exit(1) + } + promoteKeys := map[string]promoteCandidate{} + for _, c := range candidates { + promoteKeys[keyFor(c.Package, c.TestName)] = c + } + newFlakeTests := make([]testEntry, 0, len(updated.Gates[flakeIdx].Tests)) + for _, t := range updated.Gates[flakeIdx].Tests { + k := keyFor(t.Package, t.Name) + if _, ok := promoteKeys[k]; !ok { + newFlakeTests = append(newFlakeTests, t) + } + } + updated.Gates[flakeIdx].Tests = newFlakeTests + return updated +} + +func buildNoCandidatesSummary(agg map[string]*aggStats, flakeTests map[string]testInfo, minAgeDays int, requireClean24h bool) string { + earliest := "" + totalRuns := 0 + totalPass := 0 + totalFail := 0 + daySet := map[string]struct{}{} + for key, s := range agg { + if _, ok := flakeTests[key]; !ok { + continue + } + totalRuns += s.TotalRuns + totalPass += s.Passes + totalFail += s.Failures + if earliest == "" || (s.FirstSeenDay != "" && s.FirstSeenDay < earliest) { + earliest = s.FirstSeenDay + } + for _, d := range s.DaysObserved { + daySet[d] = struct{}{} + } + } + daysObserved := len(daySet) + return fmt.Sprintf( + "No promotion candidates. Reason: min_age_days=%d; earliest_observation=%s; days_observed=%d; require_clean_24h=%t; total_runs=%d; passes=%d; failures=%d.", + minAgeDays, earliest, daysObserved, requireClean24h, totalRuns, totalPass, totalFail, + ) +} + +// HTTP helper context +type apiCtx struct { + client *http.Client + token string +} + +func (c *apiCtx) getJSON(u string, v interface{}) error { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return err + } + req.Header.Set("Circle-Token", c.token) + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("GET %s: status %d body=%s", u, resp.StatusCode, string(body)) + } + dec := json.NewDecoder(resp.Body) + return dec.Decode(v) +} + +func (c *apiCtx) getBytes(u string) ([]byte, error) { + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + req.Header.Set("Circle-Token", c.token) + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode >= 300 { + body, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("GET %s: status %d body=%s", u, resp.StatusCode, string(body)) + } + return io.ReadAll(resp.Body) +} + +// collectReports scans CircleCI pipelines for the given GitHub repo/branch, +// locates the specified workflow and report job, and downloads/merges the +// daily-summary.json artifacts into a map keyed by date (YYYY-MM-DD). +// Only runs created at or after 'since' are considered. When multiple +// summaries exist for the same day, totals are summed and test lists merged. +func collectReports(ctx *apiCtx, org, repo, branch, workflowName, reportJobName string, since time.Time, verbose bool) (map[string]DailySummary, error) { + dailyByDay := map[string]DailySummary{} + + basePipelines := fmt.Sprintf("https://circleci.com/api/v2/project/gh/%s/%s/pipeline?branch=%s", url.PathEscape(org), url.PathEscape(repo), url.QueryEscape(branch)) + pageURL := basePipelines + + for { + pl, nextToken, err := getPipelinesPage(ctx, pageURL) + if err != nil { + return nil, err + } + if verbose { + logger.Printf("Scanning pipelines page: %s", pageURL) + } + + stop, err := processPipelines(ctx, pl, org, repo, workflowName, reportJobName, since, verbose, dailyByDay) + if err != nil { + return nil, err + } + if stop { + break + } + if nextToken == "" { + break + } + pageURL = basePipelines + "&page-token=" + url.QueryEscape(nextToken) + } + return dailyByDay, nil +} + +// getPipelinesPage fetches a page of pipelines and returns the list along with the next page token. +func getPipelinesPage(ctx *apiCtx, pageURL string) (pipelineList, string, error) { + var pl pipelineList + if err := ctx.getJSON(pageURL, &pl); err != nil { + return pipelineList{}, "", err + } + return pl, pl.NextPageToken, nil +} + +// processPipelines iterates pipelines, filters by date/window, and merges daily summaries. +// It returns stop=true when it encounters pipelines older than the provided 'since' time. +func processPipelines(ctx *apiCtx, pl pipelineList, org, repo, workflowName, reportJobName string, since time.Time, verbose bool, dailyByDay map[string]DailySummary) (bool, error) { + for _, p := range pl.Items { + if verbose { + logger.Printf(" pipeline %s created_at=%s", p.ID, p.CreatedAt.Format(time.RFC3339)) + } + if p.CreatedAt.Before(since) { + return true, nil + } + + wfl, err := listWorkflows(ctx, p.ID) + if err != nil { + return false, err + } + for _, w := range wfl.Items { + if w.Name != workflowName { + continue + } + jl, err := listJobs(ctx, w.ID) + if err != nil { + return false, err + } + for _, j := range jl.Items { + if j.Name != reportJobName { + continue + } + al, err := listArtifacts(ctx, org, repo, j.JobNumber, verbose) + if err != nil { + return false, err + } + dailyURL := findDailySummaryArtifactURL(al) + if dailyURL == "" { + continue + } + if err := loadAndMergeDailySummary(ctx, dailyURL, dailyByDay, verbose); err != nil { + return false, err + } + } + } + } + return false, nil +} + +func listWorkflows(ctx *apiCtx, pipelineID string) (workflowList, error) { + wfURL := fmt.Sprintf("https://circleci.com/api/v2/pipeline/%s/workflow", pipelineID) + var wfl workflowList + if err := ctx.getJSON(wfURL, &wfl); err != nil { + return workflowList{}, err + } + return wfl, nil +} + +func listJobs(ctx *apiCtx, workflowID string) (jobList, error) { + jobsURL := fmt.Sprintf("https://circleci.com/api/v2/workflow/%s/job", workflowID) + var jl jobList + if err := ctx.getJSON(jobsURL, &jl); err != nil { + return jobList{}, err + } + return jl, nil +} + +func listArtifacts(ctx *apiCtx, org, repo string, jobNumber int, verbose bool) (artifactsList, error) { + artsURL := fmt.Sprintf("https://circleci.com/api/v2/project/gh/%s/%s/%d/artifacts", url.PathEscape(org), url.PathEscape(repo), jobNumber) + var al artifactsList + if err := ctx.getJSON(artsURL, &al); err != nil { + return artifactsList{}, err + } + if verbose { + logger.Printf(" job %d artifacts: %d", jobNumber, len(al.Items)) + for _, a := range al.Items { + logger.Printf(" - %s", a.Path) + } + } + return al, nil +} + +func findDailySummaryArtifactURL(al artifactsList) string { + for _, a := range al.Items { + // Accept any artifact path that ends with the filename, regardless of destination prefix + if strings.HasSuffix(a.Path, "daily-summary.json") { + return a.URL + } + } + return "" +} + +func loadAndMergeDailySummary(ctx *apiCtx, dailyURL string, dailyByDay map[string]DailySummary, verbose bool) error { + data, err := ctx.getBytes(dailyURL) + if err != nil { + return err + } + var ds DailySummary + if json.Unmarshal(data, &ds) != nil || ds.Date == "" { + return nil + } + if prev, seen := dailyByDay[ds.Date]; !seen { + dailyByDay[ds.Date] = ds + if verbose { + logger.Printf(" loaded daily summary for %s (runs=%d iterations=%d)", ds.Date, ds.TotalRuns, ds.Iterations) + } + return nil + } else { + merged := prev + merged.TotalRuns += ds.TotalRuns + merged.Iterations += ds.Iterations + merged.StableTests = append(merged.StableTests, ds.StableTests...) + merged.UnstableTests = append(merged.UnstableTests, ds.UnstableTests...) + dailyByDay[ds.Date] = merged + if verbose { + logger.Printf(" merged another run for %s (+runs=%d +iters=%d) now runs=%d iters=%d", ds.Date, ds.TotalRuns, ds.Iterations, merged.TotalRuns, merged.Iterations) + } + return nil + } +} + +// aggregate reduces per-day test summaries into a single map keyed by test, +// summing runs/passes/failures and tracking which days each test appeared. +// It also computes first/last seen day boundaries for each test. +func aggregate(daily map[string]DailySummary) map[string]*aggStats { + result := map[string]*aggStats{} + // Collect all days + days := make([]string, 0, len(daily)) + for d := range daily { + days = append(days, d) + } + sort.Strings(days) + + for _, day := range days { + if ds, ok := daily[day]; ok { + for _, t := range ds.StableTests { + k := keyFor(t.Package, t.TestName) + s := ensureAgg(result, k, t.Package, t.TestName, day) + s.TotalRuns += t.TotalRuns + s.Passes += t.TotalRuns + } + for _, t := range ds.UnstableTests { + k := keyFor(t.Package, t.TestName) + s := ensureAgg(result, k, t.Package, t.TestName, day) + s.TotalRuns += t.TotalRuns + s.Passes += t.Passes + s.Failures += t.Failures + approx := parseDayEnd(day) + if s.LastFailureAt == nil || approx.After(*s.LastFailureAt) { + s.LastFailureAt = &approx + } + } + } + } + return result +} + +// ensureAgg returns the aggregated stats bucket for the given test key, creating it +// if it does not exist. It also records the provided day in DaysObserved (without +// duplicates) and updates FirstSeenDay/LastSeenDay bounds accordingly. +func ensureAgg(m map[string]*aggStats, key, pkg, name, day string) *aggStats { + s, ok := m[key] + if !ok { + s = &aggStats{Package: pkg, TestName: name, DaysObserved: []string{}, FirstSeenDay: day, LastSeenDay: day} + m[key] = s + } + // Append day if new + found := false + for _, d := range s.DaysObserved { + if d == day { + found = true + break + } + } + if !found { + s.DaysObserved = append(s.DaysObserved, day) + if s.FirstSeenDay == "" || day < s.FirstSeenDay { + s.FirstSeenDay = day + } + if s.LastSeenDay == "" || day > s.LastSeenDay { + s.LastSeenDay = day + } + } + return s +} + +// parseDayEnd returns the exclusive end-of-day bound for the given date +// (YYYY-MM-DD) in UTC. This is the start of the next day, suitable for +// half-open intervals: [start, end). +func parseDayEnd(day string) time.Time { + t, err := time.Parse("2006-01-02", day) + if err != nil { + return time.Now().UTC() + } + return t.UTC().Add(24 * time.Hour) +} + +func keyFor(pkg, name string) string { + return pkg + "::" + strings.TrimSpace(name) +} + +func readAcceptanceYAML(path string) (acceptanceYAML, error) { + var acc acceptanceYAML + data, err := os.ReadFile(path) + if err != nil { + return acc, err + } + if err := yaml.Unmarshal(data, &acc); err != nil { + return acc, err + } + if len(acc.Gates) == 0 { + return acc, errors.New("no gates found") + } + return acc, nil +} + +func findGate(acc *acceptanceYAML, id string) *gateYAML { + for i, gate := range acc.Gates { + if gate.ID == id { + return &acc.Gates[i] + } + } + return nil +} + +func indexOfGate(acc *acceptanceYAML, id string) int { + for i := range acc.Gates { + if acc.Gates[i].ID == id { + return i + } + } + return -1 +} + +func writeJSON(path string, v interface{}) error { + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + enc := json.NewEncoder(f) + enc.SetEscapeHTML(false) + enc.SetIndent("", " ") + return enc.Encode(v) +} + +func writeYAML(path string, v interface{}) error { + data, err := yaml.Marshal(v) + if err != nil { + return err + } + // Normalize line endings + data = bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n")) + return os.WriteFile(path, data, 0o644) +} + +// updateFlakeShakeGateOnly updates only the flake-shake gate tests list in the original YAML bytes, +// preserving all comments and formatting elsewhere. It removes any test entries matching promoteKeys. +func updateFlakeShakeGateOnly(original []byte, gateID string, promoteKeys map[string]promoteCandidate) ([]byte, error) { + if len(original) == 0 { + // Fallback to structured marshal if original missing (should not happen in CI) + return yaml.Marshal(nil) + } + lines := strings.Split(string(original), "\n") + var out []string + inGates := false + inFlake := false + indentGate := "" + indentTests := "" + // simple state machine: copy all lines except tests under flake-shake that match promoteKeys + for i := 0; i < len(lines); i++ { + line := lines[i] + trimmed := strings.TrimSpace(line) + // Detect top-level 'gates:' + if strings.HasPrefix(trimmed, "gates:") { + inGates = true + out = append(out, line) + continue + } + if inGates && strings.HasPrefix(trimmed, "- id:") { + // Entering a gate block + // Determine indentation + indentGate = line[:len(line)-len(strings.TrimLeft(line, " \t"))] + // Gate id value + id := strings.TrimSpace(strings.TrimPrefix(trimmed, "- id:")) + id = strings.Trim(id, "\"')") + inFlake = (id == gateID) + out = append(out, line) + continue + } + if !inFlake { + out = append(out, line) + continue + } + // Within flake-shake gate only + if strings.HasPrefix(strings.TrimSpace(line), "tests:") && indentTests == "" { + // Capture tests indent from next line if present + out = append(out, line) + // From here, filter list items until we leave tests list (deduce by indentation decrease or new key at gate level) + pos := i + 1 + for ; pos < len(lines); pos++ { + cur := lines[pos] + curTrim := strings.TrimSpace(cur) + if curTrim == "" { + out = append(out, cur) + continue + } + // end of this gate block if next key aligns to indentGate and not list item + if !strings.HasPrefix(cur, indentGate+" ") || strings.HasPrefix(strings.TrimSpace(cur), "- id:") { + // set i so that outer loop reprocesses this line next + i = pos - 1 + break + } + // If this is a list item under tests: starts with indentGate + two spaces + two more (tests indent) + '- ' + // We can't rely on exact spaces; detect package line to gather block + if strings.HasPrefix(strings.TrimSpace(cur), "- package:") { + // start of a test block; buffer until next '- package:' or gate-level boundary + block := []string{cur} + pkg := strings.TrimSpace(strings.TrimPrefix(curTrim, "- package:")) + name := "" + j := pos + 1 + for ; j < len(lines); j++ { + nt := strings.TrimSpace(lines[j]) + if nt == "" { + block = append(block, lines[j]) + continue + } + // next test or end of tests + if strings.HasPrefix(nt, "- package:") || (!strings.HasPrefix(lines[j], indentGate+" ")) { + j-- + break + } + block = append(block, lines[j]) + if strings.HasPrefix(nt, "name:") { + name = strings.TrimSpace(strings.TrimPrefix(nt, "name:")) + } + } + // Decide keep or drop + k := keyFor(pkg, name) + if _, toPromote := promoteKeys[k]; !toPromote { + out = append(out, block...) + } + pos = j + continue + } + // Non-test line under tests; keep + out = append(out, cur) + } + continue + } + out = append(out, line) + } + return []byte(strings.Join(out, "\n")), nil +} diff --git a/op-acceptance-tests/cmd/flake-shake-promoter/main_test.go b/op-acceptance-tests/cmd/flake-shake-promoter/main_test.go new file mode 100644 index 0000000000000..6e2cb7698851d --- /dev/null +++ b/op-acceptance-tests/cmd/flake-shake-promoter/main_test.go @@ -0,0 +1,112 @@ +package main + +import ( + "reflect" + "slices" + "sort" + "testing" + "time" +) + +func TestKeyFor(t *testing.T) { + if got := keyFor("pkg/a", ""); got != "pkg/a::" { + t.Fatalf("empty name => got %q", got) + } + if got := keyFor("pkg/a", " TestFoo "); got != "pkg/a::TestFoo" { + t.Fatalf("trim => got %q", got) + } +} + +func TestParseDayEnd(t *testing.T) { + end := parseDayEnd("2025-01-02") + want := time.Date(2025, 1, 3, 0, 0, 0, 0, time.UTC) + if !end.Equal(want) { + t.Fatalf("end != start-of-next-day: got %v want %v", end, want) + } +} + +func TestEnsureAggAndAggregate(t *testing.T) { + // ensureAgg behavior + m := map[string]*aggStats{} + s := ensureAgg(m, "pkg::T1", "pkg", "T1", "2025-01-01") + if s.FirstSeenDay != "2025-01-01" || s.LastSeenDay != "2025-01-01" { + t.Fatalf("first/last not set") + } + s2 := ensureAgg(m, "pkg::T1", "pkg", "T1", "2025-01-02") + if s2 != s { + t.Fatalf("ensureAgg did not return same pointer for same key") + } + if !slices.Contains(s.DaysObserved, "2025-01-01") || !slices.Contains(s.DaysObserved, "2025-01-02") { + t.Fatalf("days observed missing: %v", s.DaysObserved) + } + + // aggregate behavior across days + day1 := DailySummary{Date: "2025-01-01"} + day1.UnstableTests = append(day1.UnstableTests, struct { + TestName string `json:"test_name"` + Package string `json:"package"` + TotalRuns int `json:"total_runs"` + Passes int `json:"passes"` + Failures int `json:"failures"` + PassRate float64 `json:"pass_rate"` + }{TestName: "T1", Package: "pkg", TotalRuns: 10, Passes: 10, Failures: 0}) + + day2 := DailySummary{Date: "2025-01-02"} + day2.UnstableTests = append(day2.UnstableTests, struct { + TestName string `json:"test_name"` + Package string `json:"package"` + TotalRuns int `json:"total_runs"` + Passes int `json:"passes"` + Failures int `json:"failures"` + PassRate float64 `json:"pass_rate"` + }{TestName: "T1", Package: "pkg", TotalRuns: 5, Passes: 4, Failures: 1}) + + agg := aggregate(map[string]DailySummary{ + day1.Date: day1, + day2.Date: day2, + }) + a := agg["pkg::T1"] + if a == nil || a.TotalRuns != 15 || a.Passes != 14 || a.Failures != 1 { + t.Fatalf("bad aggregate: %+v", a) + } + // DaysObserved should be sorted and include both + wantDays := []string{"2025-01-01", "2025-01-02"} + gotDays := append([]string(nil), a.DaysObserved...) + sort.Strings(gotDays) + if !reflect.DeepEqual(gotDays, wantDays) { + t.Fatalf("days mismatch: got %v want %v", gotDays, wantDays) + } +} + +func TestSelectPromotionCandidates(t *testing.T) { + now := time.Date(2025, 1, 10, 0, 0, 0, 0, time.UTC) + flake := map[string]testInfo{"pkg::T1": {Timeout: "1m"}} + agg := map[string]*aggStats{ + "pkg::T1": { + Package: "pkg", + TestName: "T1", + TotalRuns: 100, + Passes: 100, + Failures: 0, + FirstSeenDay: "2025-01-01", + DaysObserved: []string{"2025-01-01", "2025-01-02"}, + }, + } + cands, reasons := selectPromotionCandidates(agg, flake, 50, 0.01, true, 3, now) + if len(reasons) != 0 { + t.Fatalf("unexpected reasons: %v", reasons) + } + if len(cands) != 1 || cands[0].Package != "pkg" || cands[0].TestName != "T1" { + t.Fatalf("unexpected candidates: %+v", cands) + } +} + +func TestComputeUpdatedConfig(t *testing.T) { + cfg := acceptanceYAML{Gates: []gateYAML{{ID: "flake-shake", Tests: []testEntry{{Package: "pkg", Name: "T1"}, {Package: "pkg", Name: "T2"}}}}} + cands := []promoteCandidate{{Package: "pkg", TestName: "T1"}} + updated := computeUpdatedConfig(cfg, "flake-shake", cands) + tests := updated.Gates[0].Tests + if len(tests) != 1 || tests[0].Name != "T2" { + t.Fatalf("expected only T2 to remain, got %+v", tests) + } +} diff --git a/op-acceptance-tests/cmd/main.go b/op-acceptance-tests/cmd/main.go index afec1898eee17..6bb15b918e8df 100644 --- a/op-acceptance-tests/cmd/main.go +++ b/op-acceptance-tests/cmd/main.go @@ -22,6 +22,19 @@ const ( defaultAcceptor = "op-acceptor" ) +// AcceptorConfig holds all configuration for running op-acceptor +type AcceptorConfig struct { + Orchestrator string + Devnet string + Gate string + TestDir string + Validators string + LogLevel string + Acceptor string + Serial bool + ShowProgress bool +} + var ( // Command line flags orchestratorFlag = &cli.StringFlag{ @@ -79,6 +92,18 @@ var ( Value: false, EnvVars: []string{"REUSE_DEVNET"}, } + serialFlag = &cli.BoolFlag{ + Name: "serial", + Usage: "Run the acceptance tests in serial mode", + Value: false, + EnvVars: []string{"SERIAL"}, + } + showProgressFlag = &cli.BoolFlag{ + Name: "show-progress", + Usage: "Show progress information during test execution", + Value: false, + EnvVars: []string{"SHOW_PROGRESS"}, + } ) func main() { @@ -95,6 +120,8 @@ func main() { kurtosisDirFlag, acceptorFlag, reuseDevnetFlag, + serialFlag, + showProgressFlag, }, Action: runAcceptanceTest, } @@ -116,6 +143,8 @@ func runAcceptanceTest(c *cli.Context) error { kurtosisDir := c.String(kurtosisDirFlag.Name) acceptor := c.String(acceptorFlag.Name) reuseDevnet := c.Bool(reuseDevnetFlag.Name) + serial := c.Bool(serialFlag.Name) + showProgress := c.Bool(showProgressFlag.Name) // Validate inputs based on orchestrator type if orchestrator != "sysgo" && orchestrator != "sysext" { @@ -185,7 +214,18 @@ func runAcceptanceTest(c *cli.Context) error { // Run acceptance tests steps = append(steps, func(ctx context.Context) error { - return runOpAcceptor(ctx, tracer, orchestrator, devnet, gate, absTestDir, absValidators, logLevel, acceptor) + config := AcceptorConfig{ + Orchestrator: orchestrator, + Devnet: devnet, + Gate: gate, + TestDir: absTestDir, + Validators: absValidators, + LogLevel: logLevel, + Acceptor: acceptor, + Serial: serial, + ShowProgress: showProgress, + } + return runOpAcceptor(ctx, tracer, config) }, ) @@ -216,7 +256,7 @@ func deployDevnet(ctx context.Context, tracer trace.Tracer, devnet string, kurto return nil } -func runOpAcceptor(ctx context.Context, tracer trace.Tracer, orchestrator string, devnet string, gate string, testDir string, validators string, logLevel string, acceptor string) error { +func runOpAcceptor(ctx context.Context, tracer trace.Tracer, config AcceptorConfig) error { ctx, span := tracer.Start(ctx, "run acceptance test") defer span.End() @@ -224,29 +264,46 @@ func runOpAcceptor(ctx context.Context, tracer trace.Tracer, orchestrator string // Build the command arguments args := []string{ - "--testdir", testDir, - "--gate", gate, - "--validators", validators, - "--log.level", logLevel, - "--orchestrator", orchestrator, + "--testdir", config.TestDir, + "--gate", config.Gate, + "--validators", config.Validators, + "--log.level", config.LogLevel, + "--orchestrator", config.Orchestrator, + } + if config.Serial { + args = append(args, "--serial") + } + if config.ShowProgress { + args = append(args, "--show-progress") + args = append(args, "--progress-interval", "20s") } // Handle devnet parameter based on orchestrator type - if orchestrator == "sysext" && devnet != "" { + if config.Orchestrator == "sysext" && config.Devnet != "" { var devnetEnvURL string - if strings.HasPrefix(devnet, "kt://") || strings.HasPrefix(devnet, "ktnative://") { + if strings.HasPrefix(config.Devnet, "kt://") || strings.HasPrefix(config.Devnet, "ktnative://") { // Already a URL or file path - use directly - devnetEnvURL = devnet + devnetEnvURL = config.Devnet } else { // Simple name - wrap as Kurtosis URL - devnetEnvURL = fmt.Sprintf("kt://%s-devnet", devnet) + devnetEnvURL = fmt.Sprintf("kt://%s-devnet", config.Devnet) } args = append(args, "--devnet-env-url", devnetEnvURL) } - acceptorCmd := exec.CommandContext(ctx, acceptor, args...) + // For sysgo, we allow skips + if config.Orchestrator == "sysgo" { + args = append(args, "--allow-skips") + } + + // Exclude quarantined tests by default in all runs except when explicitly running the flake-shake gate + if config.Gate != "flake-shake" { + args = append(args, "--exclude-gates", "flake-shake") + } + + acceptorCmd := exec.CommandContext(ctx, config.Acceptor, args...) acceptorCmd.Env = env acceptorCmd.Stdout = os.Stdout acceptorCmd.Stderr = os.Stderr diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 080dcaf054835..afdf3d4bafd86 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -1,47 +1,45 @@ -REPO_ROOT := `realpath ..` +REPO_ROOT := `realpath ..` # path to the root of the optimism monorepo KURTOSIS_DIR := REPO_ROOT + "/kurtosis-devnet" -ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v1.1.1") +ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.5.0") DOCKER_REGISTRY := env_var_or_default("DOCKER_REGISTRY", "us-docker.pkg.dev/oplabs-tools-artifacts/images") ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-acceptor:" + ACCEPTOR_VERSION) # Default recipe - runs acceptance tests default: - @just acceptance-test simple base + @just acceptance-test "" base -holocene: - @just acceptance-test simple holocene - -isthmus: - @just acceptance-test isthmus isthmus +jovian: + @just acceptance-test jovian jovian interop: - @just acceptance-test interop interop + @just acceptance-test "" interop # Run acceptance tests with mise-managed binary -acceptance-test devnet="" gate="holocene": +# Usage: just acceptance-test [devnet] [gate] +# Examples: +# just acceptance-test "" base # In-process (sysgo) with specific gate +# just acceptance-test "" "" # In-process gateless mode (all tests) +# just acceptance-test "simple" base # External devnet with specific gate +# just acceptance-test "simple" "" # External devnet gateless mode (all tests) +acceptance-test devnet="" gate="base": #!/usr/bin/env bash set -euo pipefail - # Check if mise is installed - if command -v mise >/dev/null; then - echo "mise is installed" - else - echo "Mise not installed, falling back to Docker..." - just acceptance-test-docker {{devnet}} {{gate}} - fi + # Determine mode and orchestrator + GATELESS_MODE=$([[ "{{gate}}" == "" ]] && echo "true" || echo "false") + ORCHESTRATOR=$([[ "{{devnet}}" == "" ]] && echo "sysgo" || echo "sysext") - if [[ "{{devnet}}" == "" ]]; then - echo -e "DEVNET: in-memory, GATE: {{gate}}\n" + # Display mode information + if [[ "$GATELESS_MODE" == "true" ]]; then + echo -e "DEVNET: $([[ "$ORCHESTRATOR" == "sysgo" ]] && echo "in-memory" || echo "{{devnet}}") ($ORCHESTRATOR), MODE: gateless (all tests)\n" else - echo -e "DEVNET: {{devnet}}, GATE: {{gate}}\n" + echo -e "DEVNET: $([[ "$ORCHESTRATOR" == "sysgo" ]] && echo "in-memory" || echo "{{devnet}}") ($ORCHESTRATOR), GATE: {{gate}}\n" fi - # For sysgo orchestrator (in-process testing) ensure: - # - contracts are built - # - cannon dependencies are built - # Note: build contracts only if not in CI (CI jobs already take care of this) - if [[ "{{devnet}}" == "" && -z "${CIRCLECI:-}" ]]; then + # Build dependencies for sysgo (in-process) mode if not in CI + # In CI jobs already take care of this, so we skip it. + if [[ "$ORCHESTRATOR" == "sysgo" && -z "${CIRCLECI:-}" ]]; then echo "Building contracts (local build)..." cd {{REPO_ROOT}} echo " - Updating submodules..." @@ -63,46 +61,74 @@ acceptance-test devnet="" gate="holocene": fi fi - # Try to install op-acceptor using mise + cd {{REPO_ROOT}}/op-acceptance-tests + + # Check mise installation and fallback to Docker if needed + if ! command -v mise >/dev/null; then + echo "Mise not installed, falling back to Docker..." + just acceptance-test-docker {{devnet}} {{gate}} + exit 0 + fi + + # Install op-acceptor using mise if ! mise install op-acceptor; then echo "WARNING: Failed to install op-acceptor with mise, falling back to Docker..." just acceptance-test-docker {{devnet}} {{gate}} exit 0 fi - # Print which binary is being used + # Set binary path and log level BINARY_PATH=$(mise which op-acceptor) echo "Using mise-managed binary: $BINARY_PATH" + LOG_LEVEL="$(echo "${LOG_LEVEL:-info}" | grep -E '^(debug|info|warn|error)$' || echo 'info')" + echo "LOG_LEVEL: $LOG_LEVEL" + + # Deploy devnet for sysext if it's a simple name + if [[ "$ORCHESTRATOR" == "sysext" && ! "{{devnet}}" =~ ^(kt://|ktnative://|/) ]]; then + echo "Deploying devnet {{devnet}}..." + just {{KURTOSIS_DIR}}/{{devnet}}-devnet || true + fi - # Build the command with conditional parameters - CMD_ARGS=( - "go" "run" "cmd/main.go" - "--gate" "{{gate}}" - "--testdir" "{{REPO_ROOT}}" - "--validators" "./acceptance-tests.yaml" - "--log.level" "${LOG_LEVEL:-info}" - "--acceptor" "$BINARY_PATH" - ) - - # Set orchestrator and devnet based on input - if [[ "{{devnet}}" == "" ]]; then - # In-process testing - CMD_ARGS+=("--orchestrator" "sysgo") + # Build command arguments based on mode + if [[ "$GATELESS_MODE" == "true" ]]; then + # Gateless mode - use binary directly + CMD_ARGS=( + "$BINARY_PATH" + "--testdir" "{{REPO_ROOT}}/op-acceptance-tests/..." + "--validators" "./acceptance-tests.yaml" + "--exclude-gates" "flake-shake" + "--allow-skips" + "--timeout" "90m" + "--default-timeout" "10m" + "--orchestrator" "$ORCHESTRATOR" + "--show-progress" + ) else - # External devnet testing - CMD_ARGS+=("--orchestrator" "sysext") - CMD_ARGS+=("--devnet" "{{devnet}}") - # Include kurtosis-dir for devnet deployment - CMD_ARGS+=("--kurtosis-dir" "{{KURTOSIS_DIR}}") + # Gate mode - use go run with acceptor binary + CMD_ARGS=( + "go" "run" "cmd/main.go" + "--gate" "{{gate}}" + "--testdir" "{{REPO_ROOT}}" + "--validators" "./acceptance-tests.yaml" + "--acceptor" "$BINARY_PATH" + "--log.level" "${LOG_LEVEL}" + "--orchestrator" "$ORCHESTRATOR" + "--show-progress" + ) + fi + + # Add sysext-specific arguments + if [[ "$ORCHESTRATOR" == "sysext" ]]; then + CMD_ARGS+=("--devnet" "{{devnet}}" "--kurtosis-dir" "{{KURTOSIS_DIR}}" "--serial") fi # Execute the command - cd {{REPO_ROOT}}/op-acceptance-tests "${CMD_ARGS[@]}" + # Run acceptance tests against a devnet using Docker (fallback if needed) -acceptance-test-docker devnet="simple" gate="holocene": +acceptance-test-docker devnet="simple" gate="base": #!/usr/bin/env bash set -euo pipefail @@ -122,6 +148,7 @@ acceptance-test-docker devnet="simple" gate="holocene": --testdir "/go/src/github.com/ethereum-optimism/optimism" \ --gate {{gate}} \ --validators /acceptance-tests.yaml \ + $( [[ "{{gate}}" != "flake-shake" ]] && echo --exclude-gates flake-shake ) \ --log.level debug @@ -129,3 +156,23 @@ acceptance-test-docker devnet="simple" gate="holocene": clean: kurtosis clean --all rm -rf tests/interop/loadtest/artifacts + + +# Build, vet, lint and test Go code in ./cmd +cmd-check: + #!/usr/bin/env bash + set -euo pipefail + + cd {{REPO_ROOT}}/op-acceptance-tests + + echo "Downloading Go modules..." + go mod download + + echo "Building ./cmd/..." + go build ./cmd/... + + echo "Running go vet on ./cmd/..." + go vet ./cmd/... + + echo "Running unit tests for ./cmd/..." + go test -v ./cmd/... diff --git a/op-acceptance-tests/scripts/ci_flake_shake_calc_iterations.sh b/op-acceptance-tests/scripts/ci_flake_shake_calc_iterations.sh new file mode 100644 index 0000000000000..46ddcfee91011 --- /dev/null +++ b/op-acceptance-tests/scripts/ci_flake_shake_calc_iterations.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ci_flake_shake_calc_iterations.sh +# +# Purpose: +# Compute the number of iterations each CircleCI parallel worker should run +# and export FLAKE_SHAKE_ITERATIONS and FLAKE_SHAKE_WORKER_ID into $BASH_ENV. +# +# Usage: +# ci_flake_shake_calc_iterations.sh [WORKERS] [WORKER_ID] +# +# Arguments: +# TOTAL_ITER (required): total iterations across all workers. +# WORKERS (optional): number of parallel workers (defaults to $CIRCLE_NODE_TOTAL or 1). +# WORKER_ID (optional): 1-based worker id (defaults to $((CIRCLE_NODE_INDEX+1)) or 1). +# +# Notes: +# - Remainder iterations are distributed one-by-one to the first N workers. + +TOTAL_ITER=${1:?TOTAL_ITER is required} +WORKERS=${2:-${CIRCLE_NODE_TOTAL:-1}} +WORKER_ID=${3:-$((${CIRCLE_NODE_INDEX:-0} + 1))} + +ITER_PER_WORKER=$(( TOTAL_ITER / WORKERS )) +REMAINDER=$(( TOTAL_ITER % WORKERS )) + +# Distribute the remainder fairly: the first $REMAINDER workers get one extra iteration +if [ "$WORKER_ID" -le "$REMAINDER" ] && [ "$REMAINDER" -ne 0 ]; then + ITER_COUNT=$(( ITER_PER_WORKER + 1 )) +else + ITER_COUNT=$ITER_PER_WORKER +fi + +echo "Worker $WORKER_ID running $ITER_COUNT of $TOTAL_ITER iterations" +if [ -n "${BASH_ENV:-}" ]; then + echo "export FLAKE_SHAKE_ITERATIONS=$ITER_COUNT" >> "$BASH_ENV" + echo "export FLAKE_SHAKE_WORKER_ID=$WORKER_ID" >> "$BASH_ENV" +fi + +exit 0 + + diff --git a/op-acceptance-tests/scripts/ci_flake_shake_generate_summary.sh b/op-acceptance-tests/scripts/ci_flake_shake_generate_summary.sh new file mode 100644 index 0000000000000..3da11928ce193 --- /dev/null +++ b/op-acceptance-tests/scripts/ci_flake_shake_generate_summary.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ci_flake_shake_generate_summary.sh +# +# Purpose: +# Used in CI by the flake-shake report job to transform the aggregated +# flake-shake report (JSON) into two derivative artifacts: +# 1) daily-summary.json – compact daily snapshot for downstream tooling +# 2) promotion-ready.json – list of tests with 100% pass rate (promotion candidates) +# +# Usage: ci_flake_shake_generate_summary.sh [REPORT_JSON] [OUT_DIR] +# $1 REPORT_JSON (optional) – path to flake-shake aggregated report JSON +# Default: final-report/flake-shake-report.json +# $2 OUT_DIR (optional) – directory where outputs will be written +# Default: final-report +# +# Side-effects (env): +# Exports UNSTABLE_COUNT into $BASH_ENV for later CI steps (if available). +# +# Requirements: +# - jq must be available in PATH +# +# Notes: +# - This script is intentionally simple and idempotent; it does not mutate +# the input report and only writes to OUT_DIR. + +REPORT_JSON=${1:-final-report/flake-shake-report.json} +OUT_DIR=${2:-final-report} + +if [ ! -f "$REPORT_JSON" ]; then + echo "ERROR: Report not found at $REPORT_JSON" >&2 + exit 1 +fi + +mkdir -p "$OUT_DIR" + +# Print a short human-readable summary to the job logs +echo "=== Flake-Shake Results ===" +STABLE=$(jq '[.tests[] | select(.recommendation == "STABLE")] | length' "$REPORT_JSON") +UNSTABLE=$(jq '[.tests[] | select(.recommendation == "UNSTABLE")] | length' "$REPORT_JSON") +echo "✅ STABLE: $STABLE tests" +echo "⚠️ UNSTABLE: $UNSTABLE tests" +if [ "$UNSTABLE" -gt 0 ]; then + echo "Unstable tests:" + jq -r '.tests[] | select(.recommendation == "UNSTABLE") | " - \(.test_name) (\(.pass_rate)%)"' "$REPORT_JSON" +fi + +# Write daily summary JSON (compact per-day snapshot) +jq '{date, gate, total_runs, iterations, + totals: { + stable: ([.tests[] | select(.recommendation=="STABLE")] | length), + unstable: ([.tests[] | select(.recommendation=="UNSTABLE")] | length) + }, + stable_tests: [ + .tests[] | select(.recommendation=="STABLE") | + {test_name, package, total_runs, pass_rate} + ], + unstable_tests: [ + .tests[] | select(.recommendation=="UNSTABLE") | + {test_name, package, total_runs, passes, failures, pass_rate} + ] + }' "$REPORT_JSON" > "$OUT_DIR/daily-summary.json" + +# Write promotion readiness (100% pass) JSON +jq '{ready: [.tests[] | select(.recommendation=="STABLE") | {test_name, package, total_runs, pass_rate, avg_duration, min_duration, max_duration}]}' "$REPORT_JSON" > "$OUT_DIR/promotion-ready.json" + +# Export UNSTABLE_COUNT for later CI steps (if BASH_ENV is present) +if [ -n "${BASH_ENV:-}" ]; then + echo "export UNSTABLE_COUNT=$UNSTABLE" >> "$BASH_ENV" +fi + +echo "Wrote: $OUT_DIR/daily-summary.json, $OUT_DIR/promotion-ready.json" + diff --git a/op-acceptance-tests/scripts/ci_flake_shake_prepare_slack.sh b/op-acceptance-tests/scripts/ci_flake_shake_prepare_slack.sh new file mode 100644 index 0000000000000..33acc57194aa2 --- /dev/null +++ b/op-acceptance-tests/scripts/ci_flake_shake_prepare_slack.sh @@ -0,0 +1,85 @@ +#!/usr/bin/env bash +set -euo pipefail + +# ci_flake_shake_prepare_slack.sh +# +# Purpose: +# Used in CI by the flake-shake promote job to parse the promoter output +# (`promotion-ready.json`) and prepare environment variables consumed by the +# Slack orb step. +# +# Inputs (positional): +# $1 PROMO_JSON – path to the promoter output `promotion-ready.json`. +# Default: ./final-promotion/promotion-ready.json +# +# Outputs (env): +# Exports into $BASH_ENV (for subsequent steps): +# - SLACK_BLOCKS_PAYLOAD: compact JSON array of Slack Block Kit blocks for the message body +# +# Requirements: +# - jq must be available in PATH +# +# Block count constraints: +# Slack allows max 50 blocks per message. Our layout uses: +# - 3 header/link/divider blocks +# - 4 blocks per candidate (2 sections + 1 context + 1 divider) +# - +1 optional overflow notice block +# This caps candidates at 11; if more, we add a final notice linking to the job. + +PROMO_JSON=${1:-./final-promotion/promotion-ready.json} + +SLACK_BLOCKS="[]" +if [ -f "$PROMO_JSON" ]; then + # Build Block Kit blocks (header + link + divider + per-candidate sections) + SLACK_BLOCKS=$(jq -c \ + --arg url "${CIRCLE_BUILD_URL:-}" \ + --slurpfile meta "${PROMO_JSON%/*}/metadata.json" ' + def name_or_pkg(t): (if ((t.test_name|tostring)|length) == 0 then "(package)" else t.test_name end); + def testblocks(t): [ + {"type":"section","fields":[ + {"type":"mrkdwn","text":"*Test:*\n\(name_or_pkg(t))"}, + {"type":"mrkdwn","text":"*Package:*\n\(t.package)"} + ]}, + {"type":"section","fields":[ + {"type":"mrkdwn","text":"*Runs:*\n\(t.total_runs)"}, + {"type":"mrkdwn","text":"*Pass Rate:*\n\((t.pass_rate|tostring))%"} + ]}, + {"type":"context","elements":[{"type":"mrkdwn","text": t.package }]}, + {"type":"divider"} + ]; + . as $root | + ($meta | if length>0 then .[0] else {} end) as $meta | + ($meta.date // "") as $date | + ($meta.gate // "flake-shake") as $gate | + ($meta.pr_url // "") as $pr_url | + ( if (($meta.flake_gate_tests // 0) == 0) then + [ + {"type":"header","text":{"type":"plain_text","text":":partywizard: Acceptance Tests: Flake-Shake — Gate Empty"}}, + {"type":"section","text":{"type":"mrkdwn","text":"No tests in flake-shake gate; nothing to promote. Artifacts: <\($url)|CircleCI Job>"}} + ] + elif ($root.candidates|length) == 0 then + [ + {"type":"header","text":{"type":"plain_text","text":":partywizard: Acceptance Tests: No Flake-Shake Promotion Candidates — \(if $date != "" then $date else (now|strftime("%Y-%m-%d")) end)"}}, + {"type":"section","text":{"type":"mrkdwn","text":"No promotions today. Artifacts: <\($url)|CircleCI Job>"}} + ] + else + ( + [ + {"type":"header","text":{"type":"plain_text","text":":partywizard: Acceptance Tests: Flake-Shake Promotion Candidates (\($root.candidates|length)) — \(if $date != "" then $date else (now|strftime("%Y-%m-%d")) end)"}}, + {"type":"section","text":{"type":"mrkdwn","text": (if $pr_url != "" then "PR: <\($pr_url)|Open PR> • Artifacts: <\($url)|CircleCI Job>" else "Artifacts: <\($url)|CircleCI Job>" end) }}, + {"type":"divider"} + ] + ) + + ( ($root.candidates[:11] | map(testblocks(.)) | add) ) + + ( if ($root.candidates|length) > 11 then + [ {"type":"section","text":{"type":"mrkdwn","text":"Too many tests; see the report: <\($url)|CircleCI Job>"}} ] + else [] end ) + end ) + ' "$PROMO_JSON") +fi + +echo "export SLACK_BLOCKS_PAYLOAD='$SLACK_BLOCKS'" >> "$BASH_ENV" + +echo "Prepared Slack env: blocks generated" + +echo "[debug] SLACK_BLOCKS: $SLACK_BLOCKS" diff --git a/op-acceptance-tests/scripts/generate-flaky-tests-report.sh b/op-acceptance-tests/scripts/generate-flaky-tests-report.sh index e52bc08b878ee..1f4730ee6b137 100755 --- a/op-acceptance-tests/scripts/generate-flaky-tests-report.sh +++ b/op-acceptance-tests/scripts/generate-flaky-tests-report.sh @@ -95,6 +95,55 @@ API_RESPONSE=$(echo "$API_RESPONSE" | jq '.flaky_tests = (.flaky_tests | map(sel echo "$API_RESPONSE" > "$OUTPUT_DIR/flaky_tests.json" echo "Raw API response saved to $OUTPUT_DIR/flaky_tests.json" +# Verify that each flaky test's job belongs to the target branch by checking its pipeline branch +echo "Verifying pipeline branches for each flaky test..." +FILTERED_JSON="$OUTPUT_DIR/flaky_tests.filtered.json" +PIPELINE_BRANCH_CACHE=$(mktemp) +FILTERED_ENTRIES=$(mktemp) + +cleanup_branch_filter() { + rm -f "$PIPELINE_BRANCH_CACHE" "$FILTERED_ENTRIES" || true +} +trap cleanup_branch_filter EXIT + +get_branch_for_pipeline_number() { + local pipeline_number="$1" + # Check cache + local cached + cached=$(awk -v num="$pipeline_number" '$1==num {print $2; found=1} END{ if(!found) exit 1 }' "$PIPELINE_BRANCH_CACHE" 2>/dev/null || true) + if [ -n "$cached" ]; then + echo "$cached" + return 0 + fi + # Fetch from CircleCI API (project pipeline by number) + local resp + resp=$(curl -s -H "Circle-Token: $CIRCLE_API_TOKEN" "https://circleci.com/api/v2/project/gh/$ORG_NAME/$REPO_NAME/pipeline/$pipeline_number") + if [ -z "$resp" ]; then + echo "" + return 0 + fi + local branch + branch=$(echo "$resp" | jq -r '.vcs.branch // .branch // empty') + printf "%s %s\n" "$pipeline_number" "${branch}" >> "$PIPELINE_BRANCH_CACHE" + echo "$branch" +} + +: > "$FILTERED_ENTRIES" +while IFS= read -r entry; do + pipeline_number=$(echo "$entry" | jq -r '.pipeline_number // empty') + if [ -z "$pipeline_number" ]; then + continue + fi + branch=$(get_branch_for_pipeline_number "$pipeline_number") + if [ "$branch" = "$BRANCH" ]; then + echo "$entry" >> "$FILTERED_ENTRIES" + fi +done < <(jq -c '.flaky_tests[]' "$OUTPUT_DIR/flaky_tests.json") + +jq -s '{flaky_tests: .}' "$FILTERED_ENTRIES" > "$FILTERED_JSON" +API_RESPONSE=$(cat "$FILTERED_JSON") +echo "Filtered API response saved to $FILTERED_JSON" + # Check if the response contains flaky_tests if ! echo "$API_RESPONSE" | jq -e '.flaky_tests' > /dev/null 2>&1; then echo "Error: Invalid JSON response or missing 'flaky_tests' field" @@ -103,21 +152,21 @@ if ! echo "$API_RESPONSE" | jq -e '.flaky_tests' > /dev/null 2>&1; then exit 1 fi -# Check if we have any flaky tests -if ! echo "$API_RESPONSE" | jq -e '.flaky_tests | length > 0' > /dev/null 2>&1; then - echo "No flaky tests found for branch $BRANCH" - echo "API Response:" - echo "$API_RESPONSE" +# Check if we have any flaky tests after branch verification +if ! jq -e '.flaky_tests | length > 0' "$FILTERED_JSON" > /dev/null 2>&1; then + echo "No flaky tests found for branch $BRANCH after verifying pipeline branches" + echo "Filtered Response:" + cat "$FILTERED_JSON" exit 0 fi # Print the number of flaky tests found -NUM_TESTS=$(echo "$API_RESPONSE" | jq '.flaky_tests | length') +NUM_TESTS=$(jq '.flaky_tests | length' "$FILTERED_JSON") echo "Found $NUM_TESTS flaky tests" # Generate CSV report echo "Generating CSV report..." -jq -r '.flaky_tests[] | [ +jq -r '.flaky_tests | sort_by(.times_flaked) | reverse | .[] | [ .times_flaked, (.test_name | @json), (.classname | @json), @@ -128,7 +177,7 @@ jq -r '.flaky_tests[] | [ ("https://app.circleci.com/pipelines/github/" + "'"$ORG_NAME"'" + "/" + "'"$REPO_NAME"'" + "/" + (.pipeline_number | tostring) + "/workflows/" + .workflow_id + "/jobs/" + (.job_number | tostring) | @json), (.workflow_created_at | @json), (.workflow_created_at | @json) -] | @csv' "$OUTPUT_DIR/flaky_tests.json" > "$OUTPUT_DIR/flaky_tests.csv" +] | @csv' "$FILTERED_JSON" > "$OUTPUT_DIR/flaky_tests.csv" # Check if CSV file was generated and has content if [ ! -s "$OUTPUT_DIR/flaky_tests.csv" ]; then @@ -156,9 +205,13 @@ cat > "$OUTPUT_DIR/flaky_tests.html" << EOF

Flaky Tests Report

+

+ Note: These tests are potentially flaky. They may fail for reasons other than the test itself, such as network issues, devnet issues, + interference from other tests, etc. Be mindful of this when interpreting the results and investigating the failures. +

-

Branch: $BRANCH

-

Total flaky tests: $NUM_TESTS

+

Branch: $BRANCH

+

Total flaky tests: $NUM_TESTS

@@ -170,11 +223,11 @@ cat > "$OUTPUT_DIR/flaky_tests.html" << EOF - + - $(jq -r '.flaky_tests[] | ""' "$OUTPUT_DIR/flaky_tests.json") + $(jq -r '.flaky_tests | sort_by(.times_flaked) | reverse | .[] | ""' "$FILTERED_JSON")
Workflow Name Job Number Pipeline NumberBuild URLJob URL First Flaked At Last Flaked At
\(.times_flaked)\(.test_name)\(.classname)\(.job_name)\(.workflow_name)\(.job_number)\(.pipeline_number)View Build\(.workflow_created_at)\(.workflow_created_at)
\(.times_flaked)\(.test_name)\(.classname)\(.job_name)\(.workflow_name)\(.job_number)\(.pipeline_number)View Job\(.workflow_created_at)\(.workflow_created_at)
@@ -192,4 +245,4 @@ echo "HTML report generated" echo "Top 10 Flaky Tests for branch $BRANCH" echo "==========================================" jq -r '.flaky_tests | sort_by(.times_flaked) | reverse | .[0:10] | .[] | "\(.times_flaked)x: \(.test_name)"' \ - "$OUTPUT_DIR/flaky_tests.json" \ No newline at end of file + "$FILTERED_JSON" \ No newline at end of file diff --git a/op-acceptance-tests/tests/base/deposit/deposit_test.go b/op-acceptance-tests/tests/base/deposit/deposit_test.go index 7d56c3f0dbc35..5ca47d2499a62 100644 --- a/op-acceptance-tests/tests/base/deposit/deposit_test.go +++ b/op-acceptance-tests/tests/base/deposit/deposit_test.go @@ -14,10 +14,6 @@ import ( supervisorTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimal()) -} - func TestL1ToL2Deposit(gt *testing.T) { // Create a test environment using op-devstack t := devtest.SerialT(gt) @@ -30,6 +26,8 @@ func TestL1ToL2Deposit(gt *testing.T) { fundingAmount := eth.ThreeHundredthsEther alice := sys.FunderL1.NewFundedEOA(fundingAmount) t.Log("Alice L1 address", alice.Address()) + + alice.WaitForBalance(fundingAmount) initialBalance := alice.GetBalance() t.Log("Alice L1 balance", initialBalance) @@ -48,7 +46,7 @@ func TestL1ToL2Deposit(gt *testing.T) { args := portal.DepositTransaction(alice.Address(), depositAmount, 300_000, false, []byte{}) - receipt := contract.Write(alice, args, txplan.WithValue(depositAmount.ToBig())) + receipt := contract.Write(alice, args, txplan.WithValue(depositAmount)) gasPrice := receipt.EffectiveGasPrice diff --git a/op-acceptance-tests/tests/base/deposit/init_test.go b/op-acceptance-tests/tests/base/deposit/init_test.go new file mode 100644 index 0000000000000..22f5bb598421d --- /dev/null +++ b/op-acceptance-tests/tests/base/deposit/init_test.go @@ -0,0 +1,11 @@ +package deposit + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithMinimal()) +} diff --git a/op-acceptance-tests/tests/base/disputegame_v2/init_test.go b/op-acceptance-tests/tests/base/disputegame_v2/init_test.go new file mode 100644 index 0000000000000..7f14f8776a715 --- /dev/null +++ b/op-acceptance-tests/tests/base/disputegame_v2/init_test.go @@ -0,0 +1,11 @@ +package disputegame_v2 + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithMinimal(), presets.WithDisputeGameV2()) +} diff --git a/op-acceptance-tests/tests/base/disputegame_v2/smoke_test.go b/op-acceptance-tests/tests/base/disputegame_v2/smoke_test.go new file mode 100644 index 0000000000000..3824d1e62e14d --- /dev/null +++ b/op-acceptance-tests/tests/base/disputegame_v2/smoke_test.go @@ -0,0 +1,27 @@ +package disputegame_v2 + +import ( + "testing" + + challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestSmoke(gt *testing.T) { + gt.Skip("TODO(#17257): Re-enable once opcm.deploy supports v2 dispute games") + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + dgf := sys.DisputeGameFactory() + + gameArgs := dgf.GameArgs(challengerTypes.PermissionedGameType) + require.NotEmpty(gameArgs, "game args is must be set for permissioned v2 dispute games") + gameArgs = dgf.GameArgs(challengerTypes.CannonGameType) + require.NotEmpty(gameArgs, "game args is must be set for cannon v2 dispute games") + + permissionedGame := dgf.GameImpl(challengerTypes.PermissionedGameType) + require.NotEmpty(permissionedGame.Address, "permissioned game impl must be set") + cannonGame := dgf.GameImpl(challengerTypes.CannonGameType) + require.NotEmpty(cannonGame.Address, "cannon game impl must be set") +} diff --git a/op-acceptance-tests/tests/base/dummy_flaky_test.go b/op-acceptance-tests/tests/base/dummy_flaky_test.go new file mode 100644 index 0000000000000..ab703a457c375 --- /dev/null +++ b/op-acceptance-tests/tests/base/dummy_flaky_test.go @@ -0,0 +1,27 @@ +package base + +import ( + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" +) + +var dummyLogs = []string{ + "rpc error: code = DeadlineExceeded desc = context deadline exceeded while waiting for L2 block", + "assertion failed: expected balance to increase after funding, but it did not", + "unexpected revert: contract call failed with error 'insufficient funds for gas * price + value'", +} + +// This test only exists to be flaky, and is used to test the flake-shake system. +func TestDummyFlakyTest(gt *testing.T) { + t := devtest.SerialT(gt) + + t.Log("This test is flaky to test the flake-shake system") + + if rand.Float64() < 0.05 { + // provide a dummy log, from a pool of three messages + t.Log(dummyLogs[rand.Intn(len(dummyLogs))]) + t.Fail() + } +} diff --git a/op-acceptance-tests/tests/base/withdrawal/withdrawal_test.go b/op-acceptance-tests/tests/base/withdrawal/withdrawal_test.go index de9125ac670a0..d2b40beb723a9 100644 --- a/op-acceptance-tests/tests/base/withdrawal/withdrawal_test.go +++ b/op-acceptance-tests/tests/base/withdrawal/withdrawal_test.go @@ -33,9 +33,13 @@ func TestWithdrawal(gt *testing.T) { expectedL2UserBalance := depositAmount l2User.VerifyBalanceExact(expectedL2UserBalance) - withdrawal := bridge.InitiateWithdrawal(withdrawalAmount, l2User) + // Force a fresh EOA instance to avoid stale nonce state from shared L1/L2 key usage + // This prevents "nonce too low" errors in the retry logic during withdrawal initiation + freshL2User := l1User.Key().User(sys.L2EL) + + withdrawal := bridge.InitiateWithdrawal(withdrawalAmount, freshL2User) expectedL2UserBalance = expectedL2UserBalance.Sub(withdrawalAmount).Sub(withdrawal.InitiateGasCost()) - l2User.VerifyBalanceExact(expectedL2UserBalance) + freshL2User.VerifyBalanceExact(expectedL2UserBalance) withdrawal.Prove(l1User) expectedL1UserBalance = expectedL1UserBalance.Sub(withdrawal.ProveGasCost()) diff --git a/op-acceptance-tests/tests/depreqres/depreqres_test.go b/op-acceptance-tests/tests/depreqres/depreqres_test.go new file mode 100644 index 0000000000000..79fa05817e0ad --- /dev/null +++ b/op-acceptance-tests/tests/depreqres/depreqres_test.go @@ -0,0 +1,56 @@ +package depreqres + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestUnsafeChainStalling_DisabledReqRespSync(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSingleChainMultiNode(t) + require := t.Require() + l := t.Logger() + + l.Info("Confirm that the CL nodes are progressing the unsafe chain") + target := uint64(10) + dsl.CheckAll(t, + sys.L2CL.AdvancedFn(types.LocalUnsafe, target, 30), + sys.L2CLB.AdvancedFn(types.LocalUnsafe, target, 30), + ) + + l.Info("Stop the L2 batcher") + sys.L2Batcher.Stop() + + l.Info("Disconnect L2CL from L2CLB, and vice versa") + sys.L2CLB.DisconnectPeer(sys.L2CL) + sys.L2CL.DisconnectPeer(sys.L2CLB) + + ssA_before := sys.L2CL.SyncStatus() + ssB_before := sys.L2CLB.SyncStatus() + + l.Info("L2CL status before delay", "unsafeL2", ssA_before.UnsafeL2.ID(), "safeL2", ssA_before.SafeL2.ID()) + l.Info("L2CLB status before delay", "unsafeL2", ssB_before.UnsafeL2.ID(), "safeL2", ssB_before.SafeL2.ID()) + + time.Sleep(20 * time.Second) + + ssA_after := sys.L2CL.SyncStatus() + ssB_after := sys.L2CLB.SyncStatus() + + l.Info("L2CL status after delay", "unsafeL2", ssA_after.UnsafeL2.ID(), "safeL2", ssA_after.SafeL2.ID()) + l.Info("L2CLB status after delay", "unsafeL2", ssB_after.UnsafeL2.ID(), "safeL2", ssB_after.SafeL2.ID()) + + require.Greater(ssA_after.UnsafeL2.Number, ssA_before.UnsafeL2.Number, "unsafe chain for L2CL should have advanced") + require.Equal(ssB_after.UnsafeL2.Number, ssB_before.UnsafeL2.Number, "unsafe chain for L2CLB should have stalled") + + l.Info("Re-connect L2CL to L2CLB") + sys.L2CLB.ConnectPeer(sys.L2CL) + sys.L2CL.ConnectPeer(sys.L2CLB) + + l.Info("Confirm that the unsafe chain for L2CLB is stalled") + sys.L2CLB.NotAdvanced(types.LocalUnsafe, 10) +} diff --git a/op-acceptance-tests/tests/depreqres/init_test.go b/op-acceptance-tests/tests/depreqres/init_test.go new file mode 100644 index 0000000000000..ff0f2710d958c --- /dev/null +++ b/op-acceptance-tests/tests/depreqres/init_test.go @@ -0,0 +1,16 @@ +package depreqres + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithSingleChainMultiNode(), + presets.WithExecutionLayerSyncOnVerifiers(), + presets.WithCompatibleTypes(compat.SysGo), + presets.WithReqRespSyncDisabled(), + ) +} diff --git a/op-acceptance-tests/tests/ecotone/fees_test.go b/op-acceptance-tests/tests/ecotone/fees_test.go index eefd8def79b53..10852f673eb2c 100644 --- a/op-acceptance-tests/tests/ecotone/fees_test.go +++ b/op-acceptance-tests/tests/ecotone/fees_test.go @@ -28,7 +28,7 @@ func TestFees(gt *testing.T) { ecotoneFees.LogResults(result) - t.Log("Comprehensive Ecotone fees test completed successfully:", + t.Log("Ecotone fees test completed successfully", "gasUsed", result.TransactionReceipt.GasUsed, "l1Fee", result.L1Fee.String(), "l2Fee", result.L2Fee.String(), diff --git a/op-acceptance-tests/tests/fjord/check_scripts_test.go b/op-acceptance-tests/tests/fjord/check_scripts_test.go index c41892e9684f1..649f4a5e9de30 100644 --- a/op-acceptance-tests/tests/fjord/check_scripts_test.go +++ b/op-acceptance-tests/tests/fjord/check_scripts_test.go @@ -1,89 +1,188 @@ package fjord import ( - "math/big" + "context" + "crypto/rand" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/testlib/validators" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - fjordChecks "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-fjord/checks" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + txib "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" + "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" + "github.com/ethereum-optimism/optimism/op-service/txplan" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) -// TestCheckFjordScript ensures the op-chain-ops/cmd/check-fjord script runs successfully -// against a test chain with the fjord hardfork activated/unactivated -func TestCheckFjordScript(t *testing.T) { +var ( + rip7212Precompile = common.HexToAddress("0x0000000000000000000000000000000000000100") + invalid7212Data = []byte{0x00} + valid7212Data = common.FromHex("4cee90eb86eaa050036147a12d49004b6b9c72bd725d39d4785011fe190f0b4da73bd4903f0ce3b639bbbf6e8e80d16931ff4bcf5993d58468e8fb19086e8cac36dbcd03009df8c59286b162af3bd7fcc0450c9aa81be5d10d312af6c66b1d604aebd3099c618202fcfe16ae7770b0c49ab5eadf74b754204a3bb6060e44eff37618b065f9832de4ca6ca971a7a1adc826d0f7c00181a5fb2ddf79ae00b4e10e") +) - l2ChainIndex := uint64(0) +func TestCheckFjordScript(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + ctx := t.Ctx() - walletGetter, walletValidator := validators.AcquireL2WalletWithFunds(l2ChainIndex, types.NewBalance(big.NewInt(1_000_000))) - forkConfigGetter, forkValidatorA := validators.AcquireL2WithFork(l2ChainIndex, rollup.Fjord) - _, forkValidatorB := validators.AcquireL2WithoutFork(l2ChainIndex, rollup.Granite) - systest.SystemTest(t, - checkFjordScriptScenario(walletGetter, forkConfigGetter, l2ChainIndex), - walletValidator, - forkValidatorA, - forkValidatorB, - ) + err := dsl.RequiresL2Fork(ctx, sys, 0, rollup.Fjord) + require.NoError(err) + + wallet := sys.FunderL2.NewFundedEOA(eth.OneThirdEther) + + checkRIP7212(t, ctx, sys) + checkGasPriceOracle(t, ctx, sys) + checkFastLZTransactions(t, ctx, sys, wallet) +} - forkConfigGetter, notForkValidator := validators.AcquireL2WithoutFork(l2ChainIndex, rollup.Fjord) - systest.SystemTest(t, - checkFjordScriptScenario(walletGetter, forkConfigGetter, l2ChainIndex), - walletValidator, - notForkValidator, +func checkRIP7212(t devtest.T, ctx context.Context, sys *presets.Minimal) { + require := t.Require() + l2Client := sys.L2EL.Escape().EthClient() + + // Test invalid signature + response, err := l2Client.Call(ctx, ethereum.CallMsg{ + To: &rip7212Precompile, + Data: invalid7212Data, + }, rpc.LatestBlockNumber) + require.NoError(err) + require.Empty(response) + + // Test valid signature + response, err = l2Client.Call(ctx, ethereum.CallMsg{ + To: &rip7212Precompile, + Data: valid7212Data, + }, rpc.LatestBlockNumber) + require.NoError(err) + expected := common.LeftPadBytes([]byte{1}, 32) + require.Equal(expected, response) +} + +func checkGasPriceOracle(t devtest.T, ctx context.Context, sys *presets.Minimal) { + require := t.Require() + + l2Client := sys.L2EL.Escape().EthClient() + gpo := txib.NewGasPriceOracle( + txib.WithClient(l2Client), + txib.WithTo(predeploys.GasPriceOracleAddr), + txib.WithTest(t), ) + isFjord, err := contractio.Read(gpo.IsFjord(), ctx) + require.NoError(err) + require.True(isFjord) } -func checkFjordScriptScenario(walletGetter validators.WalletGetter, chainConfigGetter validators.ChainConfigGetter, chainIndex uint64) systest.SystemTestFunc { - return func(t systest.T, sys system.System) { - wallet := walletGetter(t.Context()) - chainConfig := chainConfigGetter(t.Context()) +func checkFastLZTransactions(t devtest.T, ctx context.Context, sys *presets.Minimal, wallet *dsl.EOA) { + require := t.Require() + + l2Client := sys.L2EL.Escape().EthClient() + gasPriceOracle := txib.NewGasPriceOracle( + txib.WithClient(l2Client), + txib.WithTo(predeploys.GasPriceOracleAddr), + txib.WithTest(t), + ) - l2 := sys.L2s()[chainIndex] - l2LowLevelClient, err := sys.L2s()[chainIndex].Nodes()[0].GethClient() - require.NoError(t, err) + testCases := []struct { + name string + data []byte + }{ + {"empty", nil}, + {"all-zero-256", make([]byte, 256)}, + {"all-42-256", func() []byte { + data := make([]byte, 256) + for i := range data { + data[i] = 0x42 + } + return data + }()}, + {"random-256", func() []byte { + data := make([]byte, 256) + _, _ = rand.Read(data) + return data + }()}, + } - // Get the wallet's private key and address - privateKey := wallet.PrivateKey() + for _, tc := range testCases { walletAddr := wallet.Address() + var receipt *types.Receipt + var signedTx *types.Transaction - logger := testlog.Logger(t, log.LevelDebug) - checkFjordConfig := &fjordChecks.CheckFjordConfig{ - Log: logger, - L2: l2LowLevelClient, - Key: privateKey, - Addr: walletAddr, - } + if len(tc.data) == 0 { + plannedTx := wallet.Transfer(walletAddr, eth.ZeroWei) + var err error + receipt, err = plannedTx.Included.Eval(ctx) + require.NoError(err) + require.NotNil(receipt) + + _, txs, err := l2Client.InfoAndTxsByHash(ctx, receipt.BlockHash) + require.NoError(err) - block, err := l2.Nodes()[0].BlockByNumber(t.Context(), nil) - require.NoError(t, err) - time := block.Time() - - isFjordActivated, err := validators.IsForkActivated(chainConfig, rollup.Fjord, time) - require.NoError(t, err) - - if !isFjordActivated { - err = fjordChecks.CheckRIP7212(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckRIP7212") - err = fjordChecks.CheckGasPriceOracle(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckGasPriceOracle") - err = fjordChecks.CheckTxEmpty(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckTxEmpty") - err = fjordChecks.CheckTxAllZero(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckTxAllZero") - err = fjordChecks.CheckTxAll42(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckTxAll42") - err = fjordChecks.CheckTxRandom(t.Context(), checkFjordConfig) - require.Error(t, err, "expected error for CheckTxRandom") + for _, tx := range txs { + if tx.Hash() == receipt.TxHash { + signedTx = tx + break + } + } + require.NotNil(signedTx) } else { - err = fjordChecks.CheckAll(t.Context(), checkFjordConfig) - require.NoError(t, err, "should not error on CheckAll") + opt := txplan.Combine( + wallet.Plan(), + txplan.WithTo(&walletAddr), + txplan.WithValue(eth.ZeroWei), + txplan.WithData(tc.data), + ) + plannedTx := txplan.NewPlannedTx(opt) + var err error + receipt, err = plannedTx.Included.Eval(ctx) + require.NoError(err) + require.NotNil(receipt) + + signedTx, err = dsl.FindSignedTransactionFromReceipt(ctx, l2Client, receipt) + require.NoError(err) + require.NotNil(signedTx) } + + require.Equal(uint64(1), receipt.Status) + + unsignedTx, err := dsl.CreateUnsignedTransactionFromSigned(signedTx) + require.NoError(err) + + txUnsigned, err := unsignedTx.MarshalBinary() + require.NoError(err) + + gpoFee, err := dsl.ReadGasPriceOracleL1FeeAt(ctx, l2Client, gasPriceOracle, txUnsigned, receipt.BlockHash) + require.NoError(err) + + fastLzSize := uint64(types.FlzCompressLen(txUnsigned) + 68) + gethGPOFee, err := dsl.CalculateFjordL1Cost(ctx, l2Client, types.RollupCostData{FastLzSize: fastLzSize}, receipt.BlockHash) + require.NoError(err) + require.Equalf(gethGPOFee.Uint64(), gpoFee.Uint64(), "GPO L1 fee mismatch (expected=%d actual=%d)", gethGPOFee.Uint64(), gpoFee.Uint64()) + + expectedFee, err := dsl.CalculateFjordL1Cost(ctx, l2Client, signedTx.RollupCostData(), receipt.BlockHash) + require.NoError(err) + require.NotNil(receipt.L1Fee) + dsl.ValidateL1FeeMatches(t, expectedFee, receipt.L1Fee) + + upperBound, err := dsl.ReadGasPriceOracleL1FeeUpperBoundAt(ctx, l2Client, gasPriceOracle, len(txUnsigned), receipt.BlockHash) + require.NoError(err) + txLenGPO := len(txUnsigned) + 68 + flzUpperBound := uint64(txLenGPO + txLenGPO/255 + 16) + upperBoundCost, err := dsl.CalculateFjordL1Cost(ctx, l2Client, types.RollupCostData{FastLzSize: flzUpperBound}, receipt.BlockHash) + require.NoError(err) + require.Equalf(upperBoundCost.Uint64(), upperBound.Uint64(), "GPO L1 upper bound mismatch (expected=%d actual=%d)", upperBoundCost.Uint64(), upperBound.Uint64()) + + _, err = contractio.Read(gasPriceOracle.BaseFeeScalar(), ctx) + require.NoError(err) + _, err = contractio.Read(gasPriceOracle.BlobBaseFeeScalar(), ctx) + require.NoError(err) } } diff --git a/op-acceptance-tests/tests/fjord/fees_test.go b/op-acceptance-tests/tests/fjord/fees_test.go index ea7d5af867891..f661e17a6f2d0 100644 --- a/op-acceptance-tests/tests/fjord/fees_test.go +++ b/op-acceptance-tests/tests/fjord/fees_test.go @@ -1,296 +1,54 @@ package fjord import ( - "context" - "errors" - "math/big" "testing" - "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/testlib/validators" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + dsl "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/predeploys" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - gethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/params" - "github.com/stretchr/testify/require" + txib "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" ) -// TestFees verifies that L1/L2 fees are handled properly in different fork configurations -func TestFees(t *testing.T) { - // Define which L2 chain we'll test - chainIdx := uint64(0) - - // Get validators and getters for accessing the system and wallets - walletGetter, walletValidator := validators.AcquireL2WalletWithFunds(chainIdx, types.NewBalance(big.NewInt(params.Ether))) - - // Run fjord test - _, forkValidator := validators.AcquireL2WithFork(chainIdx, rollup.Fjord) - _, notForkValidator := validators.AcquireL2WithoutFork(chainIdx, rollup.Isthmus) - systest.SystemTest(t, - feesTestScenario(walletGetter, chainIdx), - walletValidator, - forkValidator, - notForkValidator, +func TestFees(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + ctx := t.Ctx() + + err := dsl.RequiresL2Fork(ctx, sys, 0, rollup.Fjord) + require.NoError(err) + operatorFee := dsl.NewOperatorFee(t, sys.L2Chain, sys.L1EL) + operatorFee.SetOperatorFee(100000000, 500) + operatorFee.WaitForL2SyncWithCurrentL1State() + + alice := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) + bob := sys.Wallet.NewEOA(sys.L2EL) + + fjordFees := dsl.NewFjordFees(t, sys.L2Chain) + result := fjordFees.ValidateTransaction(alice, bob, eth.OneHundredthEther.ToBig()) + + l2Client := sys.L2EL.Escape().EthClient() + gpo := txib.NewGasPriceOracle( + txib.WithClient(l2Client), + txib.WithTo(predeploys.GasPriceOracleAddr), + txib.WithTest(t), ) -} - -// stateGetterAdapter adapts the ethclient to implement the StateGetter interface -type stateGetterAdapter struct { - ctx context.Context - t systest.T - client *ethclient.Client -} - -// GetState implements the StateGetter interface -func (sga *stateGetterAdapter) GetState(addr common.Address, key common.Hash) common.Hash { - var result common.Hash - val, err := sga.client.StorageAt(sga.ctx, addr, key, nil) - require.NoError(sga.t, err) - copy(result[:], val) - return result -} - -// waitForTransaction polls for a transaction receipt until it is available or the context is canceled. -// It's a simpler version of the functionality in SimpleTxManager. -func waitForTransaction(ctx context.Context, client *ethclient.Client, hash common.Hash) (*gethTypes.Receipt, error) { - ticker := time.NewTicker(500 * time.Millisecond) // Poll every 500ms - defer ticker.Stop() - - for { - receipt, err := client.TransactionReceipt(ctx, hash) - if receipt != nil && err == nil { - return receipt, nil - } else if err != nil && !errors.Is(err, ethereum.NotFound) { - return nil, err - } - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-ticker.C: - // Continue polling - } - } -} - -// feesTestScenario creates a test scenario for verifying fee calculations -func feesTestScenario( - walletGetter validators.WalletGetter, - chainIdx uint64, -) systest.SystemTestFunc { - return func(t systest.T, sys system.System) { - ctx := t.Context() - - // Get the low-level system and wallet - wallet := walletGetter(ctx) - - // Get the L2 client - l2Chain := sys.L2s()[chainIdx] - l2Client, err := l2Chain.Nodes()[0].GethClient() - require.NoError(t, err) - - // TODO: Wait for first block after genesis - // The genesis block has zero L1Block values and will throw off the GPO checks - header, err := l2Client.HeaderByNumber(ctx, big.NewInt(1)) - require.NoError(t, err) - - startBlockNumber := header.Number - - // Get the genesis config - chainConfig, err := l2Chain.Config() - require.NoError(t, err) - - // Create state getter adapter for L1 cost function - sga := &stateGetterAdapter{ - ctx: ctx, - t: t, - client: l2Client, - } - - // Create L1 cost function - l1CostFn := gethTypes.NewL1CostFunc(chainConfig, sga) - - // Create operator fee function - operatorFeeFn := gethTypes.NewOperatorCostFunc(chainConfig, sga) - - // Get wallet private key and address - fromAddr := wallet.Address() - privateKey := wallet.PrivateKey() - - // Find gaspriceoracle contract - gpoContract, err := bindings.NewGasPriceOracle(predeploys.GasPriceOracleAddr, l2Client) - require.NoError(t, err) - - // Get wallet balance before test - startBalance, err := l2Client.BalanceAt(ctx, fromAddr, startBlockNumber) - require.NoError(t, err) - require.Greater(t, startBalance.Uint64(), big.NewInt(0).Uint64()) - - // Get initial balances of fee recipients - baseFeeRecipientStartBalance, err := l2Client.BalanceAt(ctx, predeploys.BaseFeeVaultAddr, startBlockNumber) - require.NoError(t, err) - - l1FeeRecipientStartBalance, err := l2Client.BalanceAt(ctx, predeploys.L1FeeVaultAddr, startBlockNumber) - require.NoError(t, err) - - sequencerFeeVaultStartBalance, err := l2Client.BalanceAt(ctx, predeploys.SequencerFeeVaultAddr, startBlockNumber) - require.NoError(t, err) - - operatorFeeVaultStartBalance, err := l2Client.BalanceAt(ctx, predeploys.OperatorFeeVaultAddr, startBlockNumber) - require.NoError(t, err) - - genesisBlock, err := l2Client.BlockByNumber(ctx, startBlockNumber) - require.NoError(t, err) - - coinbaseStartBalance, err := l2Client.BalanceAt(ctx, genesisBlock.Coinbase(), startBlockNumber) - require.NoError(t, err) - - // Send a simple transfer from wallet to a test address - transferAmount := big.NewInt(params.Ether / 10) // 0.1 ETH - targetAddr := common.Address{0xff, 0xff} - - // Get suggested gas tip from the client instead of using a hardcoded value - gasTip, err := l2Client.SuggestGasTipCap(ctx) - require.NoError(t, err, "Failed to get suggested gas tip") - - // Estimate gas for the transaction instead of using a hardcoded value - msg := ethereum.CallMsg{ - From: fromAddr, - To: &targetAddr, - Value: transferAmount, - } - gasLimit, err := l2Client.EstimateGas(ctx, msg) - require.NoError(t, err, "Failed to estimate gas") - - // Create and sign transaction with the suggested values - nonce, err := l2Client.PendingNonceAt(ctx, fromAddr) - require.NoError(t, err) - - // Get latest header to get the base fee - header, err = l2Client.HeaderByNumber(ctx, nil) - require.NoError(t, err) - - // Calculate a reasonable gas fee cap based on the base fee - // A common approach is to set fee cap to 2x the base fee + tip - gasFeeCap := new(big.Int).Add( - new(big.Int).Mul(header.BaseFee, big.NewInt(2)), - gasTip, - ) - - txData := &gethTypes.DynamicFeeTx{ - ChainID: l2Chain.ID(), - Nonce: nonce, - GasTipCap: gasTip, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - To: &targetAddr, - Value: transferAmount, - Data: nil, - } - - // Sign transaction - tx := gethTypes.NewTx(txData) - signedTx, err := gethTypes.SignTx(tx, gethTypes.LatestSignerForChainID(l2Chain.ID()), privateKey) - require.NoError(t, err) - - // Send transaction - err = l2Client.SendTransaction(ctx, signedTx) - require.NoError(t, err) - - // Wait for transaction receipt with timeout - ctx, cancel := context.WithTimeout(ctx, time.Second*10) - defer cancel() - receipt, err := waitForTransaction(ctx, l2Client, signedTx.Hash()) - require.NoError(t, err, "Failed to wait for transaction receipt") - require.NotNil(t, receipt) - require.Equal(t, gethTypes.ReceiptStatusSuccessful, receipt.Status) - - // Get block header where transaction was included - header, err = l2Client.HeaderByNumber(ctx, receipt.BlockNumber) - require.NoError(t, err) - - // Get final balances after transaction - coinbaseEndBalance, err := l2Client.BalanceAt(ctx, header.Coinbase, header.Number) - require.NoError(t, err) - - endBalance, err := l2Client.BalanceAt(ctx, fromAddr, header.Number) - require.NoError(t, err) - - baseFeeRecipientEndBalance, err := l2Client.BalanceAt(ctx, predeploys.BaseFeeVaultAddr, header.Number) - require.NoError(t, err) - - operatorFeeVaultEndBalance, err := l2Client.BalanceAt(ctx, predeploys.OperatorFeeVaultAddr, header.Number) - require.NoError(t, err) - - l1FeeRecipientEndBalance, err := l2Client.BalanceAt(ctx, predeploys.L1FeeVaultAddr, header.Number) - require.NoError(t, err) - - sequencerFeeVaultEndBalance, err := l2Client.BalanceAt(ctx, predeploys.SequencerFeeVaultAddr, header.Number) - require.NoError(t, err) - - // Calculate differences in balances - baseFeeRecipientDiff := new(big.Int).Sub(baseFeeRecipientEndBalance, baseFeeRecipientStartBalance) - l1FeeRecipientDiff := new(big.Int).Sub(l1FeeRecipientEndBalance, l1FeeRecipientStartBalance) - sequencerFeeVaultDiff := new(big.Int).Sub(sequencerFeeVaultEndBalance, sequencerFeeVaultStartBalance) - coinbaseDiff := new(big.Int).Sub(coinbaseEndBalance, coinbaseStartBalance) - operatorFeeVaultDiff := new(big.Int).Sub(operatorFeeVaultEndBalance, operatorFeeVaultStartBalance) - - // Verify L2 fee - l2Fee := new(big.Int).Mul(gasTip, new(big.Int).SetUint64(receipt.GasUsed)) - require.Equal(t, sequencerFeeVaultDiff, coinbaseDiff, "coinbase is always sequencer fee vault") - require.Equal(t, l2Fee, coinbaseDiff, "l2 fee mismatch") - require.Equal(t, l2Fee, sequencerFeeVaultDiff) - - // Verify base fee - baseFee := new(big.Int).Mul(header.BaseFee, new(big.Int).SetUint64(receipt.GasUsed)) - require.Equal(t, baseFee, baseFeeRecipientDiff, "base fee mismatch") - - // Verify L1 fee - txBytes, err := tx.MarshalBinary() - require.NoError(t, err) - - // Calculate L1 fee based on transaction data and blocktime - l1Fee := l1CostFn(tx.RollupCostData(), header.Time) - require.Equal(t, l1Fee, l1FeeRecipientDiff, "L1 fee mismatch") - - // Calculate operator fee - expectedOperatorFee := operatorFeeFn(receipt.GasUsed, header.Time) - expectedOperatorFeeVaultEndBalance := new(big.Int).Sub(operatorFeeVaultStartBalance, expectedOperatorFee.ToBig()) - require.True(t, - operatorFeeVaultDiff.Cmp(expectedOperatorFee.ToBig()) == 0, - "operator fee mismatch: operator fee vault start balance %v, actual end balance %v, expected end balance %v", - operatorFeeVaultStartBalance, - operatorFeeVaultEndBalance, - expectedOperatorFeeVaultEndBalance, - ) - - gpoFjord, err := gpoContract.IsFjord(&bind.CallOpts{BlockNumber: header.Number}) - require.NoError(t, err) - require.True(t, gpoFjord, "GPO must report Fjord") - // Verify gas price oracle L1 fee calculation - gpoL1Fee, err := gpoContract.GetL1Fee(&bind.CallOpts{BlockNumber: header.Number}, txBytes) - require.NoError(t, err) - require.Equal(t, l1Fee, gpoL1Fee, "GPO reports L1 fee mismatch") + signedTx, err := dsl.FindSignedTransactionFromReceipt(ctx, l2Client, result.TransactionReceipt) + require.NoError(err) + require.NotNil(signedTx) - // Verify receipt L1 fee - require.Equal(t, receipt.L1Fee, l1Fee, "l1 fee in receipt is correct") + unsignedTx, err := dsl.CreateUnsignedTransactionFromSigned(signedTx) + require.NoError(err) - // Calculate total fee and verify wallet balance difference - totalFeeRecipient := new(big.Int).Add(baseFeeRecipientDiff, sequencerFeeVaultDiff) - totalFee := new(big.Int).Add(totalFeeRecipient, l1FeeRecipientDiff) - totalFee = new(big.Int).Add(totalFee, operatorFeeVaultDiff) + txUnsigned, err := unsignedTx.MarshalBinary() + require.NoError(err) - balanceDiff := new(big.Int).Sub(startBalance, endBalance) - balanceDiff.Sub(balanceDiff, transferAmount) - require.Equal(t, balanceDiff, totalFee, "balances should add up") - } + gpoL1Fee, err := dsl.ReadGasPriceOracleL1FeeAt(ctx, l2Client, gpo, txUnsigned, result.TransactionReceipt.BlockHash) + require.NoError(err) + dsl.ValidateL1FeeMatches(t, result.L1Fee, gpoL1Fee) } diff --git a/op-acceptance-tests/tests/fjord/init_test.go b/op-acceptance-tests/tests/fjord/init_test.go new file mode 100644 index 0000000000000..c66034f06f195 --- /dev/null +++ b/op-acceptance-tests/tests/fjord/init_test.go @@ -0,0 +1,13 @@ +package fjord + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, + presets.WithMinimal(), + ) +} diff --git a/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go b/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go index 86a8a070bcc13..0761c870ba9b9 100644 --- a/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go +++ b/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go @@ -94,7 +94,7 @@ func TestFlashblocksTransfer(gt *testing.T) { executedTransaction = alice.Transact( alice.Plan(), txplan.WithTo(&bobAddr), - txplan.WithValue(depositAmount.ToBig()), + txplan.WithValue(depositAmount), ) transactionApproxConfirmationTime = time.Now() newBobBalance := bobBalance.Add(depositAmount) diff --git a/op-acceptance-tests/tests/interop/contract/interop_contract_test.go b/op-acceptance-tests/tests/interop/contract/interop_contract_test.go index 17e60a04ec5c7..ae93f8ea2b118 100644 --- a/op-acceptance-tests/tests/interop/contract/interop_contract_test.go +++ b/op-acceptance-tests/tests/interop/contract/interop_contract_test.go @@ -16,6 +16,7 @@ import ( // TestRegularMessage checks that messages can be sent and relayed via L2ToL2CrossDomainMessenger func TestRegularMessage(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() diff --git a/op-acceptance-tests/tests/interop/interop_smoke_test.go b/op-acceptance-tests/tests/interop/interop_smoke_test.go deleted file mode 100644 index 2fa1be41b368e..0000000000000 --- a/op-acceptance-tests/tests/interop/interop_smoke_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package interop - -import ( - "context" - "math/big" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/testlib/validators" - sdktypes "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -func smokeTestScenario(chainIdx uint64, walletGetter validators.WalletGetter) systest.SystemTestFunc { - return func(t systest.T, sys system.System) { - ctx := t.Context() - - logger := testlog.Logger(t, log.LevelInfo) - logger = logger.With("test", "TestMinimal", "devnet", sys.Identifier()) - - chain := sys.L2s()[chainIdx] - logger = logger.With("chain", chain.ID()) - logger.Info("starting test") - - funds := sdktypes.NewBalance(big.NewInt(1 * constants.ETH)) - user := walletGetter(ctx) - - wethAddr := constants.WETH - weth, err := chain.Nodes()[0].ContractsRegistry().WETH(wethAddr) - require.NoError(t, err) - initialBalance, err := weth.BalanceOf(user.Address()).Call(ctx) - require.NoError(t, err) - logger = logger.With("user", user.Address()) - logger.Info("initial balance retrieved", "balance", initialBalance) - - logger.Info("sending ETH to contract", "amount", funds) - require.NoError(t, user.SendETH(wethAddr, funds).Send(ctx).Wait()) - - balance, err := weth.BalanceOf(user.Address()).Call(ctx) - require.NoError(t, err) - logger.Info("final balance retrieved", "balance", balance) - - require.Equal(t, initialBalance.Add(funds), balance) - } -} - -func TestInteropSystemNoop(t *testing.T) { - systest.InteropSystemTest(t, func(t systest.T, sys system.InteropSystem) { - testlog.Logger(t, log.LevelInfo).Info("noop") - }) -} - -func TestSmokeTestFailure(t *testing.T) { - // Create mock failing system - mockAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") - mockWallet := &mockFailingWallet{ - addr: mockAddr, - bal: sdktypes.NewBalance(big.NewInt(0.1 * constants.ETH)), - } - mockL1Chain := newMockFailingL1Chain( - sdktypes.ChainID(big.NewInt(1234)), - system.WalletMap{ - "user1": mockWallet, - }, - []system.Node{&mockFailingNode{ - reg: &mockContractsRegistry{}, - }}, - ) - mockL2Chain := newMockFailingL2Chain( - sdktypes.ChainID(big.NewInt(1234)), - system.WalletMap{"user1": mockWallet}, - []system.Node{&mockFailingNode{ - reg: &mockContractsRegistry{}, - }}, - ) - mockSys := &mockFailingSystem{l1Chain: mockL1Chain, l2Chain: mockL2Chain} - - // Run the smoke test logic and capture failures - getter := func(ctx context.Context) system.Wallet { - return mockWallet - } - rt := NewRecordingT(context.TODO()) - rt.TestScenario( - smokeTestScenario(0, getter), - mockSys, - ) - - // Verify that the test failed due to SendETH error - require.True(t, rt.Failed(), "test should have failed") - require.Contains(t, rt.Logs(), "transaction failure", "unexpected failure message") -} diff --git a/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go b/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go index 5899e4631b998..de70ec63dc5f5 100644 --- a/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go @@ -73,7 +73,7 @@ func NewInvalidExecMsgSpammer(t devtest.T, l2 *L2, validInitMsg suptypes.Message // any wei, but we don't want to trigger mempool balance checks. eoa := l2.Wallet.NewEOA(l2.EL) address := eoa.Address() - _, err := l2.Include(t, txplan.WithValue(eth.OneHundredthEther.ToBig()), txplan.WithTo(&address)) + _, err := l2.Include(t, txplan.WithValue(eth.OneHundredthEther), txplan.WithTo(&address)) t.Require().NoError(err) // The InvalidExecutor uses a txinclude.Includer to manage nonces concurrently. It uses a @@ -119,6 +119,7 @@ func (ie *InvalidExecMsgSpammer) Spam(t devtest.T) error { // executing messages are also spammed. The number of invalid messages spammed per slot is // configurable via NAT_INVALID_MPS (default: 1_000). func TestRelayWithInvalidMessagesSteady(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t, l2A, l2B := setupLoadTest(gt) // Emit a valid initiating message. diff --git a/op-acceptance-tests/tests/interop/loadtest/max_execute_test.go b/op-acceptance-tests/tests/interop/loadtest/max_execute_test.go index 521413d09c93c..ffb845e62614b 100644 --- a/op-acceptance-tests/tests/interop/loadtest/max_execute_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/max_execute_test.go @@ -95,6 +95,7 @@ func (e *ExecMsgSpammer) Spam(t devtest.T) error { // executing messages emitted by one spammer become initiating messages for the other spammer. The // test aims to maximize load on the supervisor (indexing and access list checks). func TestMaxExecutingMessagesBurst(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t, l2A, l2B := setupLoadTest(gt) // Initiate messages on both chains. diff --git a/op-acceptance-tests/tests/interop/loadtest/relay_test.go b/op-acceptance-tests/tests/interop/loadtest/relay_test.go index fc64ab0d45967..e7c2cbc072e97 100644 --- a/op-acceptance-tests/tests/interop/loadtest/relay_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/relay_test.go @@ -55,6 +55,7 @@ func (r *RelaySpammer) Spam(t devtest.T) error { // spammer sends one initating message on the source chain and one corresponding executing message // on the destination chain. func TestRelaySteady(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t, l2A, l2B := setupLoadTest(gt) s := NewSteady(l2B.EL.Escape().EthClient(), l2B.Config.ElasticityMultiplier(), l2B.BlockTime, WithAIMDObserver(aimdObserver{})) s.Run(t, NewRelaySpammer(l2A, l2B)) @@ -63,6 +64,7 @@ func TestRelaySteady(gt *testing.T) { // TestRelayBurst runs the Relay spammer on a Burst schedule. See TestRelaySteady for more details // on the Relay spammer. func TestRelayBurst(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t, l2A, l2B := setupLoadTest(gt) burst := NewBurst(l2B.BlockTime, WithAIMDObserver(aimdObserver{})) burst.Run(t, NewRelaySpammer(l2A, l2B)) diff --git a/op-acceptance-tests/tests/interop/loadtest/schedule.go b/op-acceptance-tests/tests/interop/loadtest/schedule.go index 9420ba4119e6f..00aa016022938 100644 --- a/op-acceptance-tests/tests/interop/loadtest/schedule.go +++ b/op-acceptance-tests/tests/interop/loadtest/schedule.go @@ -171,6 +171,12 @@ type Spammer interface { Spam(devtest.T) error } +type SpammerFunc func(t devtest.T) error + +func (s SpammerFunc) Spam(t devtest.T) error { + return s(t) +} + // Schedule schedules a Spammer. It determines how often to spam and when to stop. type Schedule interface { Run(devtest.T, Spammer) @@ -326,12 +332,16 @@ func setupAIMD(t devtest.T, blockTime time.Duration, aimdOpts ...AIMDOption) *AI t.Require().NoError(err) } aimd := NewAIMD(targetMessagePassesPerBlock, blockTime, aimdOpts...) + ctx, cancel := context.WithCancel(t.Ctx()) var wg sync.WaitGroup - t.Cleanup(wg.Wait) + t.Cleanup(func() { + cancel() + wg.Wait() + }) wg.Add(1) go func() { defer wg.Done() - aimd.Start(t.Ctx()) + aimd.Start(ctx) }() return aimd } diff --git a/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go b/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go index 71471fcd7bb73..98c1b505bc0a9 100644 --- a/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go +++ b/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go @@ -19,6 +19,7 @@ import ( // included in two L2 chains and that the cross-safe ref for both of them progresses as expected beyond // the block number where the messages were included func TestInteropHappyTx(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) diff --git a/op-acceptance-tests/tests/interop/message/interop_mon_test.go b/op-acceptance-tests/tests/interop/message/interop_mon_test.go index f4e93e5dd537b..877868f975ffd 100644 --- a/op-acceptance-tests/tests/interop/message/interop_mon_test.go +++ b/op-acceptance-tests/tests/interop/message/interop_mon_test.go @@ -17,6 +17,7 @@ import ( // TestInteropMon is testing that the op-interop-mon metrics are correctly collected func TestInteropMon(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) diff --git a/op-acceptance-tests/tests/interop/message/interop_msg_test.go b/op-acceptance-tests/tests/interop/message/interop_msg_test.go index 1d5beba28a6ff..488d2fbe0bccd 100644 --- a/op-acceptance-tests/tests/interop/message/interop_msg_test.go +++ b/op-acceptance-tests/tests/interop/message/interop_msg_test.go @@ -31,6 +31,7 @@ import ( // TestInitExecMsg tests basic interop messaging func TestInitExecMsg(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) rng := rand.New(rand.NewSource(1234)) @@ -48,6 +49,7 @@ func TestInitExecMsg(gt *testing.T) { // TestInitExecMsgWithDSL tests basic interop messaging with contract DSL func TestInitExecMsgWithDSL(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) rng := rand.New(rand.NewSource(1234)) @@ -123,6 +125,7 @@ func TestInitExecMsgWithDSL(gt *testing.T) { // TestRandomDirectedGraph tests below scenario: // Construct random directed graph of messages. func TestRandomDirectedGraph(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) @@ -245,6 +248,7 @@ func TestRandomDirectedGraph(gt *testing.T) { // TestInitExecMultipleMsg tests below scenario: // Transaction initiates and executes multiple messages of self func TestInitExecMultipleMsg(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() @@ -290,6 +294,7 @@ func TestInitExecMultipleMsg(gt *testing.T) { // TestExecSameMsgTwice tests below scenario: // Transaction that executes the same message twice. func TestExecSameMsgTwice(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() @@ -334,6 +339,7 @@ func TestExecSameMsgTwice(gt *testing.T) { // TestExecDifferentTopicCount tests below scenario: // Execute message that links with initiating message with: 0, 1, 2, 3, or 4 topics in it func TestExecDifferentTopicCount(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() @@ -384,6 +390,7 @@ func TestExecDifferentTopicCount(gt *testing.T) { // TestExecMsgOpaqueData tests below scenario: // Execute message that links with initiating message with: 0, 10KB of opaque event data in it func TestExecMsgOpaqueData(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() @@ -434,6 +441,7 @@ func TestExecMsgOpaqueData(gt *testing.T) { // TestExecMsgDifferEventIndexInSingleTx tests below scenario: // Execute message that links with initiating message with: first, random or last event of a tx. func TestExecMsgDifferEventIndexInSingleTx(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() @@ -552,6 +560,7 @@ func executeIndexedFault( // TestExecMessageInvalidAttributes tests below scenario: // Execute message, but with one or more invalid attributes inside identifiers func TestExecMessageInvalidAttributes(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := sys.T.Require() diff --git a/op-acceptance-tests/tests/interop/message/supervisor_smoke_test.go b/op-acceptance-tests/tests/interop/message/supervisor_smoke_test.go index 59cc56d309245..773249bb8693e 100644 --- a/op-acceptance-tests/tests/interop/message/supervisor_smoke_test.go +++ b/op-acceptance-tests/tests/interop/message/supervisor_smoke_test.go @@ -9,16 +9,30 @@ import ( // TestInteropSystemSupervisor tests that the supervisor can provide finalized L1 block information func TestInteropSystemSupervisor(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) sys := presets.NewSimpleInterop(t) - sys.L1Network.WaitForFinalization() + // First ensure L1 network is online and has blocks + t.Log("Waiting for L1 network to be online...") + sys.L1Network.WaitForOnline() + t.Log("L1 network is online") + + t.Log("Waiting for initial L1 block...") + initialBlock := sys.L1Network.WaitForBlock() + t.Log("Got initial L1 block", "block", initialBlock) + + // Wait for finalization (this may take some time) + t.Log("Waiting for L1 block finalization...") + finalizedBlock := sys.L1Network.WaitForFinalization() + t.Log("L1 block finalized", "block", finalizedBlock) // Get the finalized L1 block from the supervisor + t.Log("Querying supervisor for finalized L1 block...") block, err := sys.Supervisor.Escape().QueryAPI().FinalizedL1(t.Ctx()) - t.Require().NoError(err) + t.Require().NoError(err, "Failed to get finalized block from supervisor") // If we get here, the supervisor has finalized L1 block information - t.Require().NotNil(block) - t.Log("finalized l1 block", "block", block) + t.Require().NotNil(block, "Supervisor returned nil finalized block") + t.Log("Successfully got finalized L1 block from supervisor", "block", block) } diff --git a/op-acceptance-tests/tests/interop/mocks_test.go b/op-acceptance-tests/tests/interop/mocks_test.go deleted file mode 100644 index fe41f0d545af4..0000000000000 --- a/op-acceptance-tests/tests/interop/mocks_test.go +++ /dev/null @@ -1,396 +0,0 @@ -package interop - -import ( - "bytes" - "context" - "fmt" - "math/big" - "os" - "runtime" - "time" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/empty" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/params" -) - -var ( - // Ensure mockFailingTx implements WriteInvocation - _ types.WriteInvocation[any] = (*mockFailingTx)(nil) - - // Ensure mockFailingTx implements Wallet - _ system.Wallet = (*mockFailingWallet)(nil) - - // Ensure mockFailingChain implements Chain - _ system.Chain = (*mockFailingChain)(nil) - _ system.L2Chain = (*mockFailingL2Chain)(nil) -) - -// mockFailingTx implements types.WriteInvocation[any] that always fails -type mockFailingTx struct{} - -func (m *mockFailingTx) Call(ctx context.Context) (any, error) { - return nil, fmt.Errorf("simulated transaction failure") -} - -func (m *mockFailingTx) Send(ctx context.Context) types.InvocationResult { - return m -} - -func (m *mockFailingTx) Error() error { - return fmt.Errorf("transaction failure") -} - -func (m *mockFailingTx) Wait() error { - return fmt.Errorf("transaction failure") -} - -func (m *mockFailingTx) Info() any { - return nil -} - -// mockFailingWallet implements types.Wallet that fails on SendETH -type mockFailingWallet struct { - addr types.Address - key types.Key - bal types.Balance -} - -func (m *mockFailingWallet) Client() *ethclient.Client { - return nil -} - -func (m *mockFailingWallet) Address() types.Address { - return m.addr -} - -func (m *mockFailingWallet) PrivateKey() types.Key { - return m.key -} - -func (m *mockFailingWallet) Balance() types.Balance { - return m.bal -} - -func (m *mockFailingWallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { - return &mockFailingTx{} -} - -func (m *mockFailingWallet) InitiateMessage(chainID types.ChainID, target common.Address, message []byte) types.WriteInvocation[any] { - return &mockFailingTx{} -} - -func (m *mockFailingWallet) ExecuteMessage(identifier bindings.Identifier, sentMessage []byte) types.WriteInvocation[any] { - return &mockFailingTx{} -} - -func (m *mockFailingWallet) Nonce() uint64 { - return 0 -} - -func (m *mockFailingWallet) Sign(tx system.Transaction) (system.Transaction, error) { - return tx, nil -} - -func (m *mockFailingWallet) Send(ctx context.Context, tx system.Transaction) error { - return nil -} - -func (m *mockFailingWallet) Transactor() *bind.TransactOpts { - return nil -} - -// mockContractsRegistry extends empty.EmptyRegistry to provide mock contract instances -type mockContractsRegistry struct { - empty.EmptyRegistry -} - -// mockWETH implements a minimal WETH interface for testing -type mockWETH struct { - addr types.Address -} - -func (m *mockWETH) BalanceOf(account types.Address) types.ReadInvocation[types.Balance] { - return &mockReadInvocation{balance: types.NewBalance(big.NewInt(0))} -} - -// mockReadInvocation implements a read invocation that returns a fixed balance -type mockReadInvocation struct { - balance types.Balance -} - -func (m *mockReadInvocation) Call(ctx context.Context) (types.Balance, error) { - return m.balance, nil -} - -func (r *mockContractsRegistry) WETH(address types.Address) (interfaces.WETH, error) { - return &mockWETH{addr: address}, nil -} - -// mockFailingChain implements system.Chain with a failing SendETH -type mockFailingChain struct { - id types.ChainID - wallets system.WalletMap - nodes []system.Node -} - -var _ system.Chain = (*mockFailingChain)(nil) - -func newMockFailingL1Chain(id types.ChainID, wallets system.WalletMap, nodes []system.Node) *mockFailingChain { - return &mockFailingChain{ - id: id, - wallets: wallets, - nodes: nodes, - } -} - -func (m *mockFailingChain) Nodes() []system.Node { return m.nodes } -func (m *mockFailingChain) ID() types.ChainID { return m.id } -func (m *mockFailingChain) Wallets() system.WalletMap { - return m.wallets -} -func (m *mockFailingChain) Config() (*params.ChainConfig, error) { - return nil, fmt.Errorf("not implemented") -} -func (m *mockFailingChain) Addresses() system.AddressMap { - return map[string]common.Address{} -} - -var _ system.Node = (*mockFailingNode)(nil) - -type mockFailingNode struct { - reg interfaces.ContractsRegistry -} - -func (m *mockFailingNode) Client() (*sources.EthClient, error) { - return nil, fmt.Errorf("not implemented") -} -func (m *mockFailingNode) GasPrice(ctx context.Context) (*big.Int, error) { - return big.NewInt(1), nil -} -func (m *mockFailingNode) GasLimit(ctx context.Context, tx system.TransactionData) (uint64, error) { - return 1000000, nil -} -func (m *mockFailingNode) PendingNonceAt(ctx context.Context, address common.Address) (uint64, error) { - return 0, nil -} -func (m *mockFailingNode) SupportsEIP(ctx context.Context, eip uint64) bool { - return true -} -func (m *mockFailingNode) RPCURL() string { return "mock://failing" } -func (m *mockFailingNode) ContractsRegistry() interfaces.ContractsRegistry { return m.reg } -func (m *mockFailingNode) GethClient() (*ethclient.Client, error) { - return nil, fmt.Errorf("not implemented") -} -func (m *mockFailingNode) BlockByNumber(ctx context.Context, number *big.Int) (eth.BlockInfo, error) { - return nil, fmt.Errorf("not implemented") -} -func (m *mockFailingNode) Name() string { - return "mock" -} - -// mockFailingChain implements system.Chain with a failing SendETH -type mockFailingL2Chain struct { - mockFailingChain -} - -func newMockFailingL2Chain(id types.ChainID, wallets system.WalletMap, nodes []system.Node) *mockFailingL2Chain { - return &mockFailingL2Chain{ - mockFailingChain: mockFailingChain{ - id: id, - wallets: wallets, - nodes: nodes, - }, - } -} - -func (m *mockFailingL2Chain) L1Addresses() system.AddressMap { - return map[string]common.Address{} -} -func (m *mockFailingL2Chain) L1Wallets() system.WalletMap { - return map[string]system.Wallet{} -} - -// mockFailingSystem implements system.System -type mockFailingSystem struct { - l1Chain system.Chain - l2Chain system.L2Chain -} - -func (m *mockFailingSystem) Identifier() string { - return "mock-failing-system" -} - -func (m *mockFailingSystem) L1() system.Chain { - return m.l1Chain -} - -func (m *mockFailingSystem) L2s() []system.L2Chain { - return []system.L2Chain{m.l2Chain} -} - -func (m *mockFailingSystem) Close() error { - return nil -} - -// recordingT implements systest.T and records failures -type RecordingT struct { - failed bool - skipped bool - logs *bytes.Buffer - cleanup []func() - ctx context.Context -} - -func NewRecordingT(ctx context.Context) *RecordingT { - return &RecordingT{ - logs: bytes.NewBuffer(nil), - ctx: ctx, - } -} - -var _ systest.T = (*RecordingT)(nil) - -func (r *RecordingT) Context() context.Context { - return r.ctx -} - -func (r *RecordingT) WithContext(ctx context.Context) systest.T { - return &RecordingT{ - failed: r.failed, - skipped: r.skipped, - logs: r.logs, - cleanup: r.cleanup, - ctx: ctx, - } -} - -func (r *RecordingT) Deadline() (deadline time.Time, ok bool) { - // TODO - return time.Time{}, false -} - -func (r *RecordingT) Parallel() { - // TODO -} - -func (r *RecordingT) Run(name string, f func(systest.T)) { - // TODO -} - -func (r *RecordingT) Cleanup(f func()) { - r.cleanup = append(r.cleanup, f) -} - -func (r *RecordingT) Error(args ...interface{}) { - r.Log(args...) - r.Fail() -} - -func (r *RecordingT) Errorf(format string, args ...interface{}) { - r.Logf(format, args...) - r.Fail() -} - -func (r *RecordingT) Fatal(args ...interface{}) { - r.Log(args...) - r.FailNow() -} - -func (r *RecordingT) Fatalf(format string, args ...interface{}) { - r.Logf(format, args...) - r.FailNow() -} - -func (r *RecordingT) FailNow() { - r.Fail() - runtime.Goexit() -} - -func (r *RecordingT) Fail() { - r.failed = true -} - -func (r *RecordingT) Failed() bool { - return r.failed -} - -func (r *RecordingT) Helper() { - // TODO -} - -func (r *RecordingT) Log(args ...interface{}) { - fmt.Fprintln(r.logs, args...) -} - -func (r *RecordingT) Logf(format string, args ...interface{}) { - fmt.Fprintf(r.logs, format, args...) - fmt.Fprintln(r.logs) -} - -func (r *RecordingT) Name() string { - return "RecordingT" // TODO -} - -func (r *RecordingT) Setenv(key, value string) { - // Store original value - origValue, exists := os.LookupEnv(key) - - // Set new value - os.Setenv(key, value) - - // Register cleanup to restore original value - r.Cleanup(func() { - if exists { - os.Setenv(key, origValue) - } else { - os.Unsetenv(key) - } - }) - -} - -func (r *RecordingT) Skip(args ...interface{}) { - r.Log(args...) - r.SkipNow() -} - -func (r *RecordingT) SkipNow() { - r.skipped = true -} - -func (r *RecordingT) Skipf(format string, args ...interface{}) { - r.Logf(format, args...) - r.skipped = true -} - -func (r *RecordingT) Skipped() bool { - return r.skipped -} - -func (r *RecordingT) TempDir() string { - return "" // TODO -} - -func (r *RecordingT) Logs() string { - return r.logs.String() -} - -func (r *RecordingT) TestScenario(scenario systest.SystemTestFunc, sys system.System, values ...interface{}) { - // run in a separate goroutine so we can handle runtime.Goexit() - done := make(chan struct{}) - go func() { - defer close(done) - scenario(r, sys) - }() - <-done -} diff --git a/op-acceptance-tests/tests/interop/prep/prep_test.go b/op-acceptance-tests/tests/interop/prep/prep_test.go index 53965d5e508b9..7e1047f51b120 100644 --- a/op-acceptance-tests/tests/interop/prep/prep_test.go +++ b/op-acceptance-tests/tests/interop/prep/prep_test.go @@ -13,6 +13,7 @@ import ( // before interop is scheduled with an actual hardfork time. // And then confirms we can finalize the chains. func TestUnscheduledInterop(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) t.Logger().Info("Checking that chain A and B can sync, even though interop is not scheduled") diff --git a/op-acceptance-tests/tests/interop/proofs/challenger_test.go b/op-acceptance-tests/tests/interop/proofs/challenger_test.go index 1a146df2f59c5..61a0fca513d50 100644 --- a/op-acceptance-tests/tests/interop/proofs/challenger_test.go +++ b/op-acceptance-tests/tests/interop/proofs/challenger_test.go @@ -4,8 +4,10 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" @@ -13,7 +15,7 @@ import ( ) func TestChallengerPlaysGame(gt *testing.T) { - // Setup + gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) sys := presets.NewSimpleInterop(t) dsl.CheckAll(t, @@ -22,12 +24,67 @@ func TestChallengerPlaysGame(gt *testing.T) { ) badClaim := common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000") - attacker := sys.FunderL1.NewFundedEOA(eth.OneTenthEther) + attacker := sys.FunderL1.NewFundedEOA(eth.Ether(15)) dgf := sys.DisputeGameFactory() - game := dgf.StartSuperCannonGame(attacker, badClaim) + game := dgf.StartSuperCannonGame(attacker, proofs.WithRootClaim(badClaim)) - // Wait for the challenger to counter the bad root claim - claim := game.RootClaim() - claim.WaitForCounterClaim() + claim := game.RootClaim() // This is the bad claim from attacker + counterClaim := claim.WaitForCounterClaim() // This is the counter-claim from the challenger + for counterClaim.Depth() <= game.SplitDepth() { + claim = counterClaim.Attack(attacker, badClaim) + // Wait for the challenger to counter the attacker's claim, then attack again + counterClaim = claim.WaitForCounterClaim() + } +} + +func TestChallengerRespondsToMultipleInvalidClaims(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.ParallelT(gt) + sys := presets.NewSimpleInterop(t) + dsl.CheckAll(t, + sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), + sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), + ) + + attacker := sys.FunderL1.NewFundedEOA(eth.TenEther) + dgf := sys.DisputeGameFactory() + + game := dgf.StartSuperCannonGame(attacker) + claims := game.PerformMoves(attacker, + proofs.Move(0, common.Hash{0x01}, true), + proofs.Move(1, common.Hash{0x03}, true), + proofs.Move(1, common.Hash{0x02}, false), // Defends invalid claim so won't be countered. + ) + + claims[0].WaitForCounterClaim(claims...) + claims[1].WaitForCounterClaim(claims...) + claims[2].VerifyNoCounterClaim() +} + +func TestChallengerRespondsToMultipleInvalidClaimsEOA(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.ParallelT(gt) + sys := presets.NewSimpleInterop(t) + dsl.CheckAll(t, + sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), + sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), + ) + + dgf := sys.DisputeGameFactory() + attacker := dgf.CreateHelperEOA(sys.FunderL1.NewFundedEOA(eth.TenEther)) + + game := dgf.StartSuperCannonGame(attacker.EOA) + claims := attacker.PerformMoves(game.FaultDisputeGame, + proofs.Move(0, common.Hash{0x01}, true), + proofs.Move(1, common.Hash{0x03}, true), + proofs.Move(1, common.Hash{0x02}, false), // Defends invalid claim so won't be countered. + ) + + claims[0].WaitForCounterClaim(claims...) + claims[1].WaitForCounterClaim(claims...) + claims[2].VerifyNoCounterClaim() + for _, claim := range claims { + require.Equal(t, attacker.Address(), claim.Claimant()) + } } diff --git a/op-acceptance-tests/tests/interop/proofs/proposer_test.go b/op-acceptance-tests/tests/interop/proofs/proposer_test.go index da20c56ad971e..f56ae663597fc 100644 --- a/op-acceptance-tests/tests/interop/proofs/proposer_test.go +++ b/op-acceptance-tests/tests/interop/proofs/proposer_test.go @@ -8,6 +8,7 @@ import ( ) func TestProposer(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) diff --git a/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go b/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go index c0acd53d59950..eff0185c1659e 100644 --- a/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go +++ b/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go @@ -29,6 +29,9 @@ func TestSuperRootWithdrawal(gt *testing.T) { l1User.VerifyBalanceExact(initialL1Balance.Sub(depositAmount).Sub(deposit.GasCost())) l2User.VerifyBalanceExact(initialL2Balance.Add(depositAmount)) + // Wait for a block to ensure nonce synchronization between L1 and L2 EOA instances + sys.L2ChainA.WaitForBlock() + withdrawal := bridge.InitiateWithdrawal(withdrawalAmount, l2User) withdrawal.Prove(l1User) l2User.VerifyBalanceExact(initialL2Balance.Add(depositAmount).Sub(withdrawalAmount).Sub(withdrawal.InitiateGasCost())) diff --git a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go index 8842d85a9f5de..6fb1fc696127a 100644 --- a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go @@ -23,6 +23,7 @@ import ( ) func TestReorgInitExecMsg(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go b/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go index 38069e4a318af..d05dba3e091fd 100644 --- a/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go @@ -26,6 +26,7 @@ import ( // TestReorgInvalidExecMsgs tests that the supervisor reorgs the chain when an invalid exec msg is included // Each subtest runs a test with a different invalid message, by modifying the message in the txModifierFn func TestReorgInvalidExecMsgs(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") gt.Run("invalid log index", func(gt *testing.T) { testReorgInvalidExecMsg(gt, func(msg *suptypes.Message) { msg.Identifier.LogIndex = 1024 diff --git a/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go b/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go index 444a4fb63dc73..9c7592a1c879c 100644 --- a/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go @@ -19,6 +19,8 @@ import ( type checksFunc func(t devtest.T, sys *presets.SimpleInterop) func TestL2ReorgAfterL1Reorg(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + gt.Run("unsafe reorg", func(gt *testing.T) { var crossSafeRef, localSafeRef, unsafeRef eth.BlockID pre := func(t devtest.T, sys *presets.SimpleInterop) { diff --git a/op-acceptance-tests/tests/interop/reorgs/unsafe_head_test.go b/op-acceptance-tests/tests/interop/reorgs/unsafe_head_test.go index bcd7883530887..26f12f4c97a24 100644 --- a/op-acceptance-tests/tests/interop/reorgs/unsafe_head_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/unsafe_head_test.go @@ -15,6 +15,7 @@ import ( // TestReorgUnsafeHead starts an interop chain with an op-test-sequencer, which takes control over sequencing the L2 chain and introduces a reorg on the unsafe head func TestReorgUnsafeHead(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go b/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go index f02c41a08a898..0fe36ad638556 100644 --- a/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go +++ b/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go @@ -17,6 +17,7 @@ import ( // the chain reorgs because of it, and that the chain then recovers. // This test can take 3 minutes to run. func TestSequencingWindowExpiry(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) diff --git a/op-acceptance-tests/tests/interop/smoke/interop_smoke_test.go b/op-acceptance-tests/tests/interop/smoke/interop_smoke_test.go new file mode 100644 index 0000000000000..c3891ca477742 --- /dev/null +++ b/op-acceptance-tests/tests/interop/smoke/interop_smoke_test.go @@ -0,0 +1,92 @@ +package smoke + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + txib "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" + "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" + "github.com/ethereum/go-ethereum/core/types" +) + +func TestInteropSystemNoop(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) + _ = presets.NewMinimal(t) + t.Log("noop") +} + +func TestSmokeTest(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + ctx := t.Ctx() + + user := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) + + l2Client := sys.L2EL.Escape().EthClient() + weth := txib.NewBindings[txib.WETH]( + txib.WithClient(l2Client), + txib.WithTo(predeploys.WETHAddr), + txib.WithTest(t), + ) + + initialBalance, err := contractio.Read(weth.BalanceOf(user.Address()), ctx) + require.NoError(err) + t.Logf("Initial WETH balance: %s", initialBalance) + + depositAmount := eth.OneHundredthEther + + tx := user.Transfer(predeploys.WETHAddr, depositAmount) + receipt, err := tx.Included.Eval(ctx) + require.NoError(err) + require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + t.Logf("Deposited %s ETH to WETH contract", depositAmount) + + finalBalance, err := contractio.Read(weth.BalanceOf(user.Address()), ctx) + require.NoError(err) + t.Logf("Final WETH balance: %s", finalBalance) + + expectedBalance := initialBalance.Add(depositAmount) + require.Equal(expectedBalance, finalBalance, "WETH balance should have increased by deposited amount") +} + +func TestSmokeTestFailure(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + ctx := t.Ctx() + + user := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) + + l2Client := sys.L2EL.Escape().EthClient() + weth := txib.NewBindings[txib.WETH]( + txib.WithClient(l2Client), + txib.WithTo(predeploys.WETHAddr), + txib.WithTest(t), + ) + + initialBalance, err := contractio.Read(weth.BalanceOf(user.Address()), ctx) + require.NoError(err) + t.Logf("Initial WETH balance: %s", initialBalance) + + depositAmount := eth.OneEther + + userBalance := user.GetBalance() + t.Logf("User balance: %s", userBalance) + + require.True(userBalance.Lt(depositAmount), "user should have insufficient funds for this transaction") + + t.Logf("user has insufficient funds: balance=%s, required=%s", userBalance, depositAmount) + + finalBalance, err := contractio.Read(weth.BalanceOf(user.Address()), ctx) + require.NoError(err) + t.Logf("Final WETH balance: %s", finalBalance) + + require.Equal(initialBalance, finalBalance, "WETH balance should not have changed") +} diff --git a/op-acceptance-tests/tests/interop/smoke/smoke_test.go b/op-acceptance-tests/tests/interop/smoke/smoke_test.go index 31f7f0a708b91..4260c0378e4d0 100644 --- a/op-acceptance-tests/tests/interop/smoke/smoke_test.go +++ b/op-acceptance-tests/tests/interop/smoke/smoke_test.go @@ -12,11 +12,13 @@ import ( "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) // TestWrapETH checks WETH interactions, testing both reading and writing on the chain. // This demonstrates the usage of DSL for contract bindings func TestWrapETH(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) require := t.Require() sys := presets.NewMinimal(t) @@ -63,7 +65,8 @@ func TestWrapETH(gt *testing.T) { require.True(contract.Read(weth.Transfer(bob.Address(), eth.OneHundredthEther), txplan.WithSender(alice.Address()))) // Write: Alice sends Bob 0.01 WETH - contract.Write(alice, weth.Transfer(bob.Address(), eth.OneHundredthEther)) + receipt := contract.Write(alice, weth.Transfer(bob.Address(), eth.OneHundredthEther)) + require.Equal(types.ReceiptStatusSuccessful, receipt.Status) // Read: Alice has 0.01 WETH require.Equal(eth.OneHundredthEther, contract.Read(weth.BalanceOf(alice.Address()))) diff --git a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/interop_sync_test.go b/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/interop_sync_test.go index 7893eef6b4fcd..ad7c8491acfeb 100644 --- a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/interop_sync_test.go +++ b/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/interop_sync_test.go @@ -1,3 +1,5 @@ +//go:build !ci + package sync import ( @@ -13,6 +15,7 @@ import ( // TestL2CLAheadOfSupervisor tests the below scenario: // L2CL ahead of supervisor, aka supervisor needs to reset the L2CL, to reproduce old data. Currently supervisor has only indexing mode implemented, so the supervisor will ask the L2CL to reset back. func TestL2CLAheadOfSupervisor(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewMultiSupervisorInterop(t) @@ -131,6 +134,8 @@ func TestL2CLAheadOfSupervisor(gt *testing.T) { // TestUnsafeChainKnownToL2CL tests the below scenario: // supervisor cross-safe ahead of L2CL cross-safe, aka L2CL can "skip" forward to match safety of supervisor. func TestUnsafeChainKnownToL2CL(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) sys := presets.NewMultiSupervisorInterop(t) @@ -200,6 +205,8 @@ func TestUnsafeChainKnownToL2CL(gt *testing.T) { // TestUnsafeChainUnknownToL2CL tests the below scenario: // supervisor unsafe ahead of L2CL unsafe, aka L2CL processes new blocks first. func TestUnsafeChainUnknownToL2CL(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) sys := presets.NewMultiSupervisorInterop(t) @@ -240,6 +247,7 @@ func TestUnsafeChainUnknownToL2CL(gt *testing.T) { // TestL2CLSyncP2P checks that unsafe head is propagated from sequencer to verifier. // Tests started/restarted L2CL advances unsafe head via P2P connection. func TestL2CLSyncP2P(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewMultiSupervisorInterop(t) diff --git a/op-acceptance-tests/tests/interop/sync/simple_interop/interop_sync_test.go b/op-acceptance-tests/tests/interop/sync/simple_interop/interop_sync_test.go index 95fe56b4dc65a..7dc4a627aa445 100644 --- a/op-acceptance-tests/tests/interop/sync/simple_interop/interop_sync_test.go +++ b/op-acceptance-tests/tests/interop/sync/simple_interop/interop_sync_test.go @@ -1,3 +1,5 @@ +//go:build !ci + package sync import ( @@ -12,6 +14,7 @@ import ( // TestL2CLResync checks that unsafe head advances after restarting L2CL. // Resync is only possible when supervisor and L2CL reconnects. func TestL2CLResync(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) logger := sys.Log.With("Test", "TestL2CLResync") diff --git a/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go b/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go index 44845ec612bd9..9ec7a9d787a98 100644 --- a/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go +++ b/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go @@ -15,6 +15,7 @@ import ( ) func TestPostInbox(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) sys := presets.NewSingleChainInterop(t) devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { diff --git a/op-acceptance-tests/tests/interop/upgrade/init_test.go b/op-acceptance-tests/tests/interop/upgrade/init_test.go index 02f971a226f4f..c3da262c6a52f 100644 --- a/op-acceptance-tests/tests/interop/upgrade/init_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/init_test.go @@ -9,6 +9,6 @@ import ( func TestMain(m *testing.M) { presets.DoMain(m, presets.WithSimpleInterop(), - presets.WithSuggestedInteropActivationOffset(30), + presets.WithSuggestedInteropActivationOffset(60), presets.WithInteropNotAtGenesis()) } diff --git a/op-acceptance-tests/tests/interop/upgrade/post_test.go b/op-acceptance-tests/tests/interop/upgrade/post_test.go index 6ec1266d1fe95..5e5ea29ae1e0e 100644 --- a/op-acceptance-tests/tests/interop/upgrade/post_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/post_test.go @@ -1,3 +1,5 @@ +//go:build !ci + package upgrade import ( @@ -21,6 +23,7 @@ import ( ) func TestPostInbox(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) sys := presets.NewSimpleInterop(t) devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { @@ -40,7 +43,8 @@ func TestPostInbox(gt *testing.T) { } func TestPostInteropUpgradeComprehensive(gt *testing.T) { - t := devtest.ParallelT(gt) + gt.Skip("Skipping Interop Acceptance Test") + t := devtest.SerialT(gt) sys := presets.NewSimpleInterop(t) require := t.Require() logger := t.Logger() diff --git a/op-acceptance-tests/tests/interop/upgrade/pre_test.go b/op-acceptance-tests/tests/interop/upgrade/pre_test.go index 4eea3a7d81370..9af93d1c72667 100644 --- a/op-acceptance-tests/tests/interop/upgrade/pre_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/pre_test.go @@ -1,3 +1,5 @@ +//go:build !ci + package upgrade import ( @@ -21,7 +23,10 @@ import ( stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +// This test is known to be flaky +// See: https://github.com/ethereum-optimism/optimism/issues/17298 func TestPreNoInbox(gt *testing.T) { + gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) sys := presets.NewSimpleInterop(t) require := t.Require() diff --git a/op-acceptance-tests/tests/isthmus/erc20_bridge/erc20_bridge_test.go b/op-acceptance-tests/tests/isthmus/erc20_bridge/erc20_bridge_test.go index 969b380ab1ff6..9667fcf4a52e3 100644 --- a/op-acceptance-tests/tests/isthmus/erc20_bridge/erc20_bridge_test.go +++ b/op-acceptance-tests/tests/isthmus/erc20_bridge/erc20_bridge_test.go @@ -38,16 +38,16 @@ func TestERC20Bridge(gt *testing.T) { mintAmount := eth.OneHundredthEther t.Logger().Info("Minting WETH tokens on L1", "amount", mintAmount) depositCall := wethContract.Deposit() - contract.Write(l1User, depositCall, txplan.WithValue(mintAmount.ToBig())) + contract.Write(l1User, depositCall, txplan.WithValue(mintAmount)) - l1User.VerifyTokenBalance(l1TokenAddress, mintAmount) + l1User.WaitForTokenBalance(l1TokenAddress, mintAmount) t.Logger().Info("User has WETH tokens on L1", "balance", mintAmount) bridge := dsl.NewStandardBridge(t, sys.L2Chain, nil, sys.L1EL) l2TokenAddress := bridge.CreateL2Token(l1TokenAddress, "L2 WETH", "L2WETH", l2User) t.Logger().Info("Created L2 token", "address", l2TokenAddress) - l2User.VerifyTokenBalance(l2TokenAddress, eth.ZeroWei) + l2User.WaitForTokenBalance(l2TokenAddress, eth.ZeroWei) l1BridgeAddress := sys.L2Chain.Escape().Deployment().L1StandardBridgeProxyAddr() @@ -63,7 +63,6 @@ func TestERC20Bridge(gt *testing.T) { t.Logger().Info("Waiting for deposit to be processed on L2...") l2User.WaitForTokenBalance(l2TokenAddress, bridgeAmount) - l2User.VerifyTokenBalance(l2TokenAddress, bridgeAmount) t.Logger().Info("Successfully verified tokens on L2", "balance", bridgeAmount) t.Logger().Info("ERC20 bridge test completed successfully!") diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/balance_reader.go b/op-acceptance-tests/tests/isthmus/operator_fee/balance_reader.go index da92bed647b95..83ca3f94b030c 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/balance_reader.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/balance_reader.go @@ -4,7 +4,7 @@ import ( "context" "math/big" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" @@ -15,12 +15,12 @@ import ( // BalanceReader provides methods to read balances from the chain type BalanceReader struct { client *ethclient.Client - t systest.T + t devtest.T logger log.Logger } // NewBalanceReader creates a new BalanceReader instance -func NewBalanceReader(t systest.T, client *ethclient.Client, logger log.Logger) *BalanceReader { +func NewBalanceReader(t devtest.T, client *ethclient.Client, logger log.Logger) *BalanceReader { return &BalanceReader{ client: client, t: t, diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot.go b/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot.go index f67dee9382c72..8f582564fac56 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot.go @@ -4,7 +4,7 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -72,7 +72,7 @@ func (bs *BalanceSnapshot) Sub(start *BalanceSnapshot) *BalanceSnapshot { } // AssertSnapshotsEqual compares two balance snapshots and reports differences -func AssertSnapshotsEqual(t systest.T, expected, actual *BalanceSnapshot) { +func AssertSnapshotsEqual(t devtest.T, expected, actual *BalanceSnapshot) { require.NotNil(t, expected, "Expected snapshot should not be nil") require.NotNil(t, actual, "Actual snapshot should not be nil") @@ -96,3 +96,17 @@ func AssertSnapshotsEqual(t systest.T, expected, actual *BalanceSnapshot) { assert.True(t, expected.FromBalance.Cmp(actual.FromBalance) == 0, "WalletBalance mismatch: expected %v, got %v (diff: %v)", expected.FromBalance, actual.FromBalance, new(big.Int).Sub(actual.FromBalance, expected.FromBalance)) } + +// SnapshotsEqual compares two balance snapshots and returns true if they are equal +// This is a non-asserting version for unit tests +func SnapshotsEqual(expected, actual *BalanceSnapshot) bool { + if expected == nil || actual == nil { + return expected == actual + } + + return expected.BaseFeeVaultBalance.Cmp(actual.BaseFeeVaultBalance) == 0 && + expected.L1FeeVaultBalance.Cmp(actual.L1FeeVaultBalance) == 0 && + expected.SequencerFeeVault.Cmp(actual.SequencerFeeVault) == 0 && + expected.OperatorFeeVault.Cmp(actual.OperatorFeeVault) == 0 && + expected.FromBalance.Cmp(actual.FromBalance) == 0 +} diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot_test.go b/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot_test.go index e4991c566b167..1e55e703c1fa6 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot_test.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/balance_snapshot_test.go @@ -4,7 +4,6 @@ import ( "math/big" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -187,102 +186,42 @@ func TestBalanceSnapshot_Sub(t *testing.T) { }) } -// mockTB is a minimal testing.TB implementation for checking assertion failures -// without failing the actual test. -type mockTB struct { - testing.TB // Embed standard testing.TB for most methods (like Logf) - failed bool -} - -func (m *mockTB) Helper() { m.TB.Helper() } -func (m *mockTB) Errorf(string, ...any) { m.failed = true } // Just record failure -func (m *mockTB) Fatalf(string, ...any) { m.failed = true; panic("mock Fatalf") } // Record failure and panic -func (m *mockTB) FailNow() { m.failed = true; panic("mock FailNow") } // Record failure and panic -func (m *mockTB) Fail() { m.failed = true } // Just record failure -func (m *mockTB) Name() string { return m.TB.Name() } -func (m *mockTB) Logf(format string, args ...any) { m.TB.Logf(format, args...) } - -// Add other testing.TB methods if needed by systest.NewT or AssertSnapshotsEqual -func (m *mockTB) Cleanup(f func()) { m.TB.Cleanup(f) } -func (m *mockTB) Error(args ...any) { m.failed = true } -func (m *mockTB) Failed() bool { return m.failed } // Reflect our recorded state -func (m *mockTB) Fatal(args ...any) { m.failed = true; panic("mock Fatal") } -func (m *mockTB) Log(args ...any) { m.TB.Log(args...) } -func (m *mockTB) Setenv(key, value string) { m.TB.Setenv(key, value) } -func (m *mockTB) Skip(args ...any) { m.TB.Skip(args...) } -func (m *mockTB) SkipNow() { m.TB.SkipNow() } -func (m *mockTB) Skipf(format string, args ...any) { m.TB.Skipf(format, args...) } -func (m *mockTB) Skipped() bool { return m.TB.Skipped() } -func (m *mockTB) TempDir() string { return m.TB.TempDir() } - -func TestAssertSnapshotsEqual(t *testing.T) { +func TestSnapshotsEqual(t *testing.T) { snap1 := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(20), big.NewInt(30), big.NewInt(40), big.NewInt(50)) snap2 := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(20), big.NewInt(30), big.NewInt(40), big.NewInt(50)) t.Run("EqualSnapshots", func(t *testing.T) { - mockT := &mockTB{TB: t} // Use the mock TB - systestT := systest.NewT(mockT) - AssertSnapshotsEqual(systestT, snap1, snap2) - assert.False(t, mockT.failed, "AssertSnapshotsEqual should not fail for equal snapshots") + assert.True(t, SnapshotsEqual(snap1, snap2), "Equal snapshots should return true") }) t.Run("DifferentBaseFee", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) diffSnap := newTestSnapshot(big.NewInt(1), big.NewInt(99), big.NewInt(20), big.NewInt(30), big.NewInt(40), big.NewInt(50)) - AssertSnapshotsEqual(systestT, snap1, diffSnap) - assert.True(t, mockT.failed, "AssertSnapshotsEqual should fail for different BaseFeeVaultBalance") + assert.False(t, SnapshotsEqual(snap1, diffSnap), "Different BaseFeeVaultBalance should return false") }) t.Run("DifferentL1Fee", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) diffSnap := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(99), big.NewInt(30), big.NewInt(40), big.NewInt(50)) - AssertSnapshotsEqual(systestT, snap1, diffSnap) - assert.True(t, mockT.failed, "AssertSnapshotsEqual should fail for different L1FeeVaultBalance") + assert.False(t, SnapshotsEqual(snap1, diffSnap), "Different L1FeeVaultBalance should return false") }) t.Run("DifferentSequencerFee", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) diffSnap := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(20), big.NewInt(99), big.NewInt(40), big.NewInt(50)) - AssertSnapshotsEqual(systestT, snap1, diffSnap) - assert.True(t, mockT.failed, "AssertSnapshotsEqual should fail for different SequencerFeeVault") + assert.False(t, SnapshotsEqual(snap1, diffSnap), "Different SequencerFeeVault should return false") }) t.Run("DifferentOperatorFee", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) diffSnap := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(20), big.NewInt(30), big.NewInt(99), big.NewInt(50)) - AssertSnapshotsEqual(systestT, snap1, diffSnap) - assert.True(t, mockT.failed, "AssertSnapshotsEqual should fail for different OperatorFeeVault") + assert.False(t, SnapshotsEqual(snap1, diffSnap), "Different OperatorFeeVault should return false") }) t.Run("DifferentFromBalance", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) diffSnap := newTestSnapshot(big.NewInt(1), big.NewInt(10), big.NewInt(20), big.NewInt(30), big.NewInt(40), big.NewInt(99)) - AssertSnapshotsEqual(systestT, snap1, diffSnap) - assert.True(t, mockT.failed, "AssertSnapshotsEqual should fail for different FromBalance") - }) - - // Test require.NotNil checks within AssertSnapshotsEqual (which call FailNow) - t.Run("NilExpected", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) - // Use assert.Panics because require.NotNil calls t.FailNow() which our mock makes panic - assert.Panics(t, func() { - AssertSnapshotsEqual(systestT, nil, snap2) - }, "AssertSnapshotsEqual should panic via FailNow when expected is nil") - assert.True(t, mockT.failed) // Check if FailNow was triggered + assert.False(t, SnapshotsEqual(snap1, diffSnap), "Different FromBalance should return false") }) - t.Run("NilActual", func(t *testing.T) { - mockT := &mockTB{TB: t} - systestT := systest.NewT(mockT) - assert.Panics(t, func() { - AssertSnapshotsEqual(systestT, snap1, nil) - }, "AssertSnapshotsEqual should panic via FailNow when actual is nil") - assert.True(t, mockT.failed) // Check if FailNow was triggered + t.Run("NilSnapshots", func(t *testing.T) { + assert.True(t, SnapshotsEqual(nil, nil), "Both nil should return true") + assert.False(t, SnapshotsEqual(snap1, nil), "One nil should return false") + assert.False(t, SnapshotsEqual(nil, snap1), "One nil should return false") }) } diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/fee_checker.go b/op-acceptance-tests/tests/isthmus/operator_fee/fee_checker.go index d805660b91f8d..296460cc082bf 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/fee_checker.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/fee_checker.go @@ -4,7 +4,7 @@ import ( "context" "math/big" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum/go-ethereum/common" gethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" @@ -14,13 +14,13 @@ import ( ) type stateGetterAdapterFactory struct { - t systest.T + t devtest.T client *ethclient.Client } // stateGetterAdapter adapts the ethclient to implement the StateGetter interface type stateGetterAdapter struct { - t systest.T + t devtest.T client *ethclient.Client ctx context.Context blockNumber *big.Int @@ -30,7 +30,7 @@ func (f *stateGetterAdapterFactory) NewStateGetterAdapter(blockNumber *big.Int) return &stateGetterAdapter{ t: f.t, client: f.client, - ctx: f.t.Context(), + ctx: f.t.Ctx(), blockNumber: blockNumber, } } @@ -52,7 +52,7 @@ type FeeChecker struct { } // NewFeeChecker creates a new FeeChecker instance -func NewFeeChecker(t systest.T, client *ethclient.Client, chainConfig *params.ChainConfig, logger log.Logger) *FeeChecker { +func NewFeeChecker(t devtest.T, client *ethclient.Client, chainConfig *params.ChainConfig, logger log.Logger) *FeeChecker { logger.Debug("Creating fee checker", "chainID", chainConfig.ChainID) // Create state getter adapter factory diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/operator_fee_test.go b/op-acceptance-tests/tests/isthmus/operator_fee/operator_fee_test.go index 5bbbb24a861f6..e9a3f192b16a9 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/operator_fee_test.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/operator_fee_test.go @@ -11,7 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) -func TestOperatorFeeDevstack(gt *testing.T) { +func TestOperatorFee(gt *testing.T) { t := devtest.SerialT(gt) sys := presets.NewMinimal(t) require := t.Require() @@ -19,14 +19,17 @@ func TestOperatorFeeDevstack(gt *testing.T) { err := dsl.RequiresL2Fork(t.Ctx(), sys, 0, rollup.Isthmus) require.NoError(err, "Isthmus fork must be active for this test") - alice := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) + fundAmount := eth.OneTenthEther + alice := sys.FunderL2.NewFundedEOA(fundAmount) + + alice.WaitForBalance(fundAmount) bob := sys.Wallet.NewEOA(sys.L2EL) operatorFee := dsl.NewOperatorFee(t, sys.L2Chain, sys.L1EL) operatorFee.CheckCompatibility() systemOwner := operatorFee.GetSystemOwner() - sys.FunderL1.FundAtLeast(systemOwner, eth.OneTenthEther) + sys.FunderL1.FundAtLeast(systemOwner, fundAmount) // First, ensure L2 is synced with current L1 state before starting tests t.Log("Ensuring L2 is synced with current L1 state...") diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/system_config_contract_utils.go b/op-acceptance-tests/tests/isthmus/operator_fee/system_config_contract_utils.go index 2f205a40568ef..bb5c54780a6f8 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/system_config_contract_utils.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/system_config_contract_utils.go @@ -1,22 +1,18 @@ package operatorfee +// NOTE: These utility functions have been converted from devnet-sdk to op-devstack types +// but are currently unused by tests. They would need implementation updates if used. + import ( "fmt" - "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-e2e/bindings" - "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" gethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/lmittmann/w3" ) -var l1ConfigSyncPollInterval = 30 * time.Second -var l1ConfigSyncMaxWaitTime = 4 * time.Minute - type TestParams struct { ID string OperatorFeeScalar uint32 @@ -25,7 +21,7 @@ type TestParams struct { L1BlobBaseFeeScalar uint32 } -func GetFeeParamsL1(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet system.WalletV2) (tc TestParams, err error) { +func GetFeeParamsL1(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA) (tc TestParams, err error) { operatorFeeConstant, err := systemConfig.OperatorFeeConstant(&bind.CallOpts{BlockNumber: nil}) if err != nil { return TestParams{}, fmt.Errorf("failed to get operator fee constant: %w", err) @@ -50,7 +46,7 @@ func GetFeeParamsL1(systemConfig *bindings.SystemConfig, systemConfigAddress com }, nil } -func GetFeeParamsL2(l2L1BlockContract *bindings.L1Block, wallet system.WalletV2) (tc TestParams, err error) { +func GetFeeParamsL2(l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA) (tc TestParams, err error) { operatorFeeConstant, err := l2L1BlockContract.OperatorFeeConstant(&bind.CallOpts{BlockNumber: nil}) if err != nil { return TestParams{}, fmt.Errorf("failed to get operator fee constant: %w", err) @@ -75,7 +71,7 @@ func GetFeeParamsL2(l2L1BlockContract *bindings.L1Block, wallet system.WalletV2) }, nil } -func EnsureFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet system.WalletV2, tc TestParams) (err error, reset func() error) { +func EnsureFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA, tc TestParams) (err error, reset func() error) { preFeeParams, err := GetFeeParamsL1(systemConfig, systemConfigAddress, l2L1BlockContract, wallet) if err != nil { return fmt.Errorf("failed to get L1 fee parameters: %w", err), nil @@ -92,141 +88,16 @@ func EnsureFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress co } } -func UpdateFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet system.WalletV2, tc TestParams) (err error) { - - _, err = UpdateOperatorFeeParams(systemConfig, systemConfigAddress, l2L1BlockContract, wallet, tc.OperatorFeeConstant, tc.OperatorFeeScalar) - if err != nil { - return fmt.Errorf("failed to update operator fee parameters: %w", err) - } - - _, err = UpdateL1FeeParams(systemConfig, systemConfigAddress, l2L1BlockContract, wallet, tc.L1BaseFeeScalar, tc.L1BlobBaseFeeScalar) - if err != nil { - return fmt.Errorf("failed to update L1 fee parameters: %w", err) - } - - // Wait for L2 nodes to sync with L1 origin where fee parameters were set - deadline := time.Now().Add(l1ConfigSyncMaxWaitTime) - - for time.Now().Before(deadline) { - - l2FeeParams, err := GetFeeParamsL2(l2L1BlockContract, wallet) - if err != nil { - return fmt.Errorf("failed to get L2 fee parameters: %w", err) - } - l2FeeParams.ID = tc.ID - - // Check if all values match expected values - if l2FeeParams == tc { - break - } - - // Use context-aware sleep - select { - case <-time.After(l1ConfigSyncPollInterval): - // Continue with next iteration - case <-wallet.Ctx().Done(): - return fmt.Errorf("context canceled while waiting for L2 nodes to sync: %w", wallet.Ctx().Err()) - } - - // Check if context is canceled - if wallet.Ctx().Err() != nil { - return fmt.Errorf("context canceled while waiting for L2 nodes to sync: %w", wallet.Ctx().Err()) - } - } - return nil +func UpdateFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA, tc TestParams) (err error) { + return fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } // UpdateOperatorFeeParams updates the operator fee parameters in the SystemConfig contract. // It constructs and sends a transaction using txplan and returns the signed transaction, the receipt, or an error. -func UpdateOperatorFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet system.WalletV2, operatorFeeConstant uint64, operatorFeeScalar uint32) (receipt *gethTypes.Receipt, err error) { - // Construct call input - funcSetOperatorFeeScalars := w3.MustNewFunc(`setOperatorFeeScalars(uint32, uint64)`, "") - args, err := funcSetOperatorFeeScalars.EncodeArgs( - operatorFeeScalar, - operatorFeeConstant, - ) - if err != nil { - return nil, fmt.Errorf("failed to encode arguments for setOperatorFeeScalars: %w", err) - } - - // Create a transaction using txplan - opts := isthmus.DefaultTxOpts(wallet) - ptx := txplan.NewPlannedTx( - opts, - txplan.WithTo(&systemConfigAddress), - txplan.WithData(args), - ) - - _, err = ptx.Success.Eval(wallet.Ctx()) - if err != nil { - return nil, fmt.Errorf("tx failed: %w", err) - } - - // Execute the transaction and wait for inclusion - receipt = ptx.Included.Value() - - actualOperatorFeeConstant, err := systemConfig.OperatorFeeConstant(&bind.CallOpts{BlockNumber: receipt.BlockNumber}) - if err != nil { - return nil, fmt.Errorf("failed to get operator fee constant: %w", err) - } - if operatorFeeConstant != actualOperatorFeeConstant { - return nil, fmt.Errorf("operator fee constant mismatch: got %d, expected %d", actualOperatorFeeConstant, operatorFeeConstant) - } - - actualOperatorFeeScalar, err := systemConfig.OperatorFeeScalar(&bind.CallOpts{BlockNumber: receipt.BlockNumber}) - if err != nil { - return nil, fmt.Errorf("failed to get operator fee scalar: %w", err) - } - if operatorFeeScalar != actualOperatorFeeScalar { - return nil, fmt.Errorf("operator fee scalar mismatch: got %d, expected %d", actualOperatorFeeScalar, operatorFeeScalar) - } - - return receipt, nil +func UpdateOperatorFeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA, operatorFeeConstant uint64, operatorFeeScalar uint32) (receipt *gethTypes.Receipt, err error) { + return nil, fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } -func UpdateL1FeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet system.WalletV2, l1BaseFeeScalar uint32, l1BlobBaseFeeScalar uint32) (receipt *gethTypes.Receipt, err error) { - // Construct call input - funcSetGasConfigEcotone := w3.MustNewFunc(`setGasConfigEcotone(uint32 _basefeeScalar, uint32 _blobbasefeeScalar)`, "") - args, err := funcSetGasConfigEcotone.EncodeArgs( - l1BaseFeeScalar, - l1BlobBaseFeeScalar, - ) - if err != nil { - return nil, fmt.Errorf("failed to encode arguments for setGasConfigEcotone: %w", err) - } - - // Create a transaction using txplan - opts := isthmus.DefaultTxOpts(wallet) - ptx := txplan.NewPlannedTx( - opts, - txplan.WithTo(&systemConfigAddress), - txplan.WithData(args), - ) - - _, err = ptx.Success.Eval(wallet.Ctx()) - if err != nil { - return nil, fmt.Errorf("tx failed: %w", err) - } - - // Execute the transaction and wait for inclusion - receipt = ptx.Included.Value() - - // Verify the L1 fee parameters were set correctly - l1BaseFeeScalarActual, err := systemConfig.BasefeeScalar(&bind.CallOpts{BlockNumber: receipt.BlockNumber}) - if err != nil { - return nil, fmt.Errorf("failed to get l1 base fee scalar: %w", err) - } - if l1BaseFeeScalarActual != l1BaseFeeScalar { - return nil, fmt.Errorf("l1 base fee scalar mismatch: got %d, expected %d", l1BaseFeeScalarActual, l1BaseFeeScalar) - } - - blobBaseFeeScalar, err := systemConfig.BlobbasefeeScalar(&bind.CallOpts{BlockNumber: receipt.BlockNumber}) - if err != nil { - return nil, fmt.Errorf("failed to get l1 blob base fee scalar: %w", err) - } - if blobBaseFeeScalar != l1BlobBaseFeeScalar { - return nil, fmt.Errorf("l1 blob base fee scalar mismatch: got %d, expected %d", blobBaseFeeScalar, l1BlobBaseFeeScalar) - } - - return receipt, nil +func UpdateL1FeeParams(systemConfig *bindings.SystemConfig, systemConfigAddress common.Address, l2L1BlockContract *bindings.L1Block, wallet *dsl.EOA, l1BaseFeeScalar uint32, l1BlobBaseFeeScalar uint32) (receipt *gethTypes.Receipt, err error) { + return nil, fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/tx_utils.go b/op-acceptance-tests/tests/isthmus/operator_fee/tx_utils.go index d60b9aa11e6ef..2763596cc9c0e 100644 --- a/op-acceptance-tests/tests/isthmus/operator_fee/tx_utils.go +++ b/op-acceptance-tests/tests/isthmus/operator_fee/tx_utils.go @@ -1,158 +1,30 @@ package operatorfee +// NOTE: These utility functions have been converted from devnet-sdk to op-devstack types +// but are currently unused by tests. They would need implementation updates if used. + import ( "context" - "crypto/ecdsa" - "encoding/hex" "fmt" - "math/big" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/isthmus" - "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" gethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" ) -func EnsureSufficientBalance(wallet system.WalletV2, to common.Address, value *big.Int) (err error) { - balance, err := wallet.Client().BalanceAt(wallet.Ctx(), to, nil) - if err != nil { - return fmt.Errorf("failed to get balance: %w", err) - } - if balance.Cmp(value) < 0 { - tx, receipt, err := SendValueTx(wallet, to, value) - if err != nil { - return fmt.Errorf("failed to send value tx: %w", err) - } - if receipt.Status != gethTypes.ReceiptStatusSuccessful { - return fmt.Errorf("tx %s failed with status %d", tx.Hash().Hex(), receipt.Status) - } - } - return nil +func EnsureSufficientBalance(wallet *dsl.EOA, to common.Address, value eth.ETH) (err error) { + return fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } -func SendValueTx(wallet system.WalletV2, to common.Address, value *big.Int) (tx *gethTypes.Transaction, receipt *gethTypes.Receipt, err error) { - // ensure wallet is not the same as to address - if wallet.Address() == to { - return nil, nil, fmt.Errorf("wallet address is the same as the to address") - } - - walletPreBalance, err := wallet.Client().BalanceAt(wallet.Ctx(), wallet.Address(), nil) - if err != nil { - return nil, nil, fmt.Errorf("failed to get balance for from address: %w", err) - } - receiverPreBalance, err := wallet.Client().BalanceAt(wallet.Ctx(), to, nil) - if err != nil { - return nil, nil, fmt.Errorf("failed to get balance for to address: %w", err) - } - if walletPreBalance.Cmp(value) < 0 { - return nil, nil, fmt.Errorf("sender (%s) balance (%s) is less than the value (%s) attempting to send", wallet.Address(), walletPreBalance.String(), value.String()) - } - - opts := isthmus.DefaultTxOpts(wallet) - deployTx := txplan.NewPlannedTx(opts, - txplan.WithValue(value), - txplan.WithTo(&to), - ) - - signedTx, err := deployTx.Signed.Eval(wallet.Ctx()) - if err != nil { - return nil, nil, fmt.Errorf("failed to sign tx: %w", err) - } - - _, err = deployTx.Submitted.Eval(wallet.Ctx()) - if err != nil { - return nil, nil, fmt.Errorf("failed to submit tx: %w", err) - } - - _, err = deployTx.Success.Eval(wallet.Ctx()) - if err != nil { - return nil, nil, fmt.Errorf("failed to check tx success: %w", err) - } - - receipt = deployTx.Included.Value() - - // verify balance of wallet - blockNumber := new(big.Int).SetUint64(receipt.BlockNumber.Uint64()) - receiverPostBalance, err := wallet.Client().BalanceAt(wallet.Ctx(), to, blockNumber) - if err != nil { - return nil, nil, fmt.Errorf("failed to get to post-balance: %w", err) - } - if new(big.Int).Sub(receiverPostBalance, receiverPreBalance).Cmp(value) != 0 { - return nil, nil, fmt.Errorf("wallet balance was not updated successfully, expected %s, got %s", new(big.Int).Add(receiverPreBalance, value).String(), receiverPostBalance.String()) - } - - return signedTx, receipt, nil +func SendValueTx(wallet *dsl.EOA, to common.Address, value eth.ETH) (tx *gethTypes.Transaction, receipt *gethTypes.Receipt, err error) { + return nil, nil, fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } -func ReturnRemainingFunds(wallet system.WalletV2, to common.Address) (receipt *gethTypes.Receipt, err error) { - balance, err := wallet.Client().BalanceAt(wallet.Ctx(), wallet.Address(), nil) - if err != nil { - return nil, fmt.Errorf("failed to get balance: %w", err) - } - - opts := isthmus.DefaultTxOpts(wallet) - txPlan := txplan.NewPlannedTx(opts, - txplan.WithTo(&to), - ) - innerTx, err := txPlan.Unsigned.Eval(wallet.Ctx()) - if err != nil { - return nil, fmt.Errorf("failed to get inner tx: %w", err) - } - - dynInnerTx, ok := innerTx.(*gethTypes.DynamicFeeTx) - if !ok { - return nil, fmt.Errorf("inner tx is not a dynamic fee tx") - } - - gasLimit := dynInnerTx.Gas - gasFeeCap := dynInnerTx.GasFeeCap - gasCost := new(big.Int).Mul(big.NewInt(int64(gasLimit)), gasFeeCap) - - value := new(big.Int).Sub(balance, gasCost) - - if value.Sign() < 0 { - // insufficient balance, so we don't need to send a tx - return nil, nil - } - - dynInnerTx.Value = value - - opts = isthmus.DefaultTxOpts(wallet) - txPlan = txplan.NewPlannedTx(opts, - txplan.WithUnsigned(dynInnerTx), - ) - - _, err = txPlan.Success.Eval(wallet.Ctx()) - if err != nil { - return nil, fmt.Errorf("return remaining funds tx %s failed: %w", txPlan.Signed.Value().Hash().Hex(), err) - } - - receipt = txPlan.Included.Value() - - return receipt, nil +func ReturnRemainingFunds(wallet *dsl.EOA, to common.Address) (receipt *gethTypes.Receipt, err error) { + return nil, fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } -func NewTestWallet(ctx context.Context, chain system.Chain) (system.Wallet, error) { - // create new test wallet - testWalletPrivateKey, err := crypto.GenerateKey() - if err != nil { - return nil, err - } - testWalletPrivateKeyBytes := crypto.FromECDSA(testWalletPrivateKey) - testWalletPrivateKeyHex := hex.EncodeToString(testWalletPrivateKeyBytes) - testWalletPublicKey := testWalletPrivateKey.Public() - testWalletPublicKeyECDSA, ok := testWalletPublicKey.(*ecdsa.PublicKey) - if !ok { - return nil, fmt.Errorf("Failed to assert type: publicKey is not of type *ecdsa.PublicKey") - } - testWalletAddress := crypto.PubkeyToAddress(*testWalletPublicKeyECDSA) - testWallet, err := system.NewWallet( - testWalletPrivateKeyHex, - types.Address(testWalletAddress), - chain, - ) - return testWallet, err +func NewTestWallet(ctx context.Context, el dsl.ELNode) (*dsl.EOA, error) { + return nil, fmt.Errorf("not implemented for op-devstack - utility function not used by tests") } diff --git a/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go b/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go new file mode 100644 index 0000000000000..a80985e37bb1c --- /dev/null +++ b/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go @@ -0,0 +1,38 @@ +package preinterop + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func TestChallengerPlaysGame(gt *testing.T) { + gt.Skip("TODO(#16166): Re-enable once the supervisor endpoint supports super roots before interop") + t := devtest.ParallelT(gt) + sys := presets.NewSimpleInterop(t) + dsl.CheckAll(t, + sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), + sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), + ) + + badClaim := common.HexToHash("0xdeadbeef00000000000000000000000000000000000000000000000000000000") + attacker := sys.FunderL1.NewFundedEOA(eth.Ether(15)) + dgf := sys.DisputeGameFactory() + + game := dgf.StartSuperCannonGame(attacker, proofs.WithRootClaim(badClaim)) + + claim := game.RootClaim() // This is the bad claim from attacker + counterClaim := claim.WaitForCounterClaim() // This is the counter-claim from the challenger + for counterClaim.Depth() <= game.SplitDepth() { + claim = counterClaim.Attack(attacker, badClaim) + // Wait for the challenger to counter the attacker's claim, then attack again + counterClaim = claim.WaitForCounterClaim() + } +} diff --git a/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go b/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go index 32ffd4048bee7..5120ade1c460f 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go @@ -6,17 +6,13 @@ import ( "fmt" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" - "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/accounts/abi" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" ) var portalABIString = ` @@ -50,7 +46,33 @@ var portalABIString = ` ] ` +var disputeGameFactoryABIString = ` +[ + { + "inputs": [{"name": "gameType", "type": "uint32"}], + "name": "gameImpls", + "outputs": [{"name": "", "type": "address"}], + "stateMutability": "view", + "type": "function" + } +] +` + +var faultDisputeGameABIString = ` +[ + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [{"name": "", "type": "bytes32"}], + "stateMutability": "view", + "type": "function" + } +] +` + var portalABI *abi.ABI +var disputeGameFactoryABI *abi.ABI +var faultDisputeGameABI *abi.ABI func init() { if parsed, err := abi.JSON(bytes.NewReader([]byte(portalABIString))); err != nil { @@ -58,113 +80,119 @@ func init() { } else { portalABI = &parsed } -} -func TestInteropReadiness(t *testing.T) { - systest.SystemTest(t, interopReadinessTestScenario()) + if parsed, err := abi.JSON(bytes.NewReader([]byte(disputeGameFactoryABIString))); err != nil { + panic(fmt.Sprintf("failed to parse dispute game factory abi: %s", err)) + } else { + disputeGameFactoryABI = &parsed + } + + if parsed, err := abi.JSON(bytes.NewReader([]byte(faultDisputeGameABIString))); err != nil { + panic(fmt.Sprintf("failed to parse fault dispute game abi: %s", err)) + } else { + faultDisputeGameABI = &parsed + } } -func interopReadinessTestScenario() systest.SystemTestFunc { - return func(t systest.T, sys system.System) { - logger := testlog.Logger(t, log.LevelInfo) - logger.Info("Started test") +func TestInteropReadiness(gt *testing.T) { + t := devtest.ParallelT(gt) + sys := presets.NewSimpleInterop(t) - l1Client, err := sys.L1().Nodes()[0].GethClient() - require.NoError(t, err) - l1Caller := batching.NewMultiCaller(l1Client.Client(), batching.DefaultBatchSize) + t.Logger().Info("Started test") - checkAbsolutePrestate(t, sys, l1Client) - checkL1PAO(t, sys, l1Caller) - checkSuperchainConfig(t, sys, l1Caller) - checkPermissionless(t, sys, l1Caller) - } + l1EL := sys.L1EL + l1Client := l1EL.EthClient() + l1Caller := l1Client.NewMultiCaller(batching.DefaultBatchSize) + + checkAbsolutePrestate(t, sys, l1Caller) + checkL1PAO(t, sys, l1Caller) + checkSuperchainConfig(t, sys, l1Caller) + checkPermissionless(t, sys, l1Caller) } -func checkAbsolutePrestate(t systest.T, sys system.System, l1Client *ethclient.Client) { +func checkAbsolutePrestate(t devtest.T, sys *presets.SimpleInterop, l1Caller *batching.MultiCaller) { var prestate *[32]byte - for _, chain := range sys.L2s() { - p := getPrestate(t, l1Client, chain) + chains := []*dsl.L2Network{sys.L2ChainA, sys.L2ChainB} + for _, chain := range chains { + p := getPrestate(t, l1Caller, chain) if prestate == nil { prestate = &p } else { - require.Equal(t, *prestate, p) + t.Require().Equal(*prestate, p) } } - require.NotNil(t, prestate) + t.Require().NotNil(prestate) } -func checkL1PAO(t systest.T, sys system.System, l1Caller *batching.MultiCaller) { +func checkL1PAO(t devtest.T, sys *presets.SimpleInterop, l1Caller *batching.MultiCaller) { var l1PAO common.Address - for _, chain := range sys.L2s() { + chains := []*dsl.L2Network{sys.L2ChainA, sys.L2ChainB} + for _, chain := range chains { owner := getL1PAO(t, l1Caller, chain) if l1PAO == (common.Address{}) { l1PAO = owner } else { - require.Equal(t, l1PAO, owner) + t.Require().Equal(l1PAO, owner) } } - require.NotNil(t, l1PAO) + t.Require().NotEqual(common.Address{}, l1PAO) } -func checkSuperchainConfig(t systest.T, sys system.System, l1Caller *batching.MultiCaller) { +func checkSuperchainConfig(t devtest.T, sys *presets.SimpleInterop, l1Caller *batching.MultiCaller) { var superchainConfig common.Address - for _, chain := range sys.L2s() { + chains := []*dsl.L2Network{sys.L2ChainA, sys.L2ChainB} + for _, chain := range chains { address := getSuperchainConfigFromPortal(t, l1Caller, chain) if superchainConfig == (common.Address{}) { superchainConfig = address } else { - require.Equal(t, superchainConfig, address) + t.Require().Equal(superchainConfig, address) } } - require.NotNil(t, superchainConfig) + t.Require().NotEqual(common.Address{}, superchainConfig) } -func checkPermissionless(t systest.T, sys system.System, l1Caller *batching.MultiCaller) { - for _, chain := range sys.L2s() { +func checkPermissionless(t devtest.T, sys *presets.SimpleInterop, l1Caller *batching.MultiCaller) { + chains := []*dsl.L2Network{sys.L2ChainA, sys.L2ChainB} + for _, chain := range chains { gameType := getRespectedGameType(t, l1Caller, chain) - require.Equal(t, uint32(0), gameType, "chain is not permissionless") + t.Require().Equal(uint32(0), gameType, "chain is not permissionless") } } -func getL1PAO(t systest.T, l1Caller *batching.MultiCaller, l2Chain system.L2Chain) common.Address { - portalAddress, ok := l2Chain.L1Addresses()["OptimismPortalProxy"] - require.True(t, ok, "OptimismPortalProxy not found") +func getL1PAO(t devtest.T, l1Caller *batching.MultiCaller, l2Chain *dsl.L2Network) common.Address { + portalAddress := l2Chain.DepositContractAddr() contract := batching.NewBoundContract(portalABI, portalAddress) results, err := l1Caller.SingleCall(context.Background(), rpcblock.Latest, contract.Call("proxyAdminOwner")) - require.NoError(t, err) + t.Require().NoError(err) return results.GetAddress(0) } -func getSuperchainConfigFromPortal(t systest.T, l1Caller *batching.MultiCaller, l2Chain system.L2Chain) common.Address { - portalAddress, ok := l2Chain.L1Addresses()["OptimismPortalProxy"] - require.True(t, ok, "OptimismPortalProxy not found") +func getSuperchainConfigFromPortal(t devtest.T, l1Caller *batching.MultiCaller, l2Chain *dsl.L2Network) common.Address { + portalAddress := l2Chain.DepositContractAddr() contract := batching.NewBoundContract(portalABI, portalAddress) results, err := l1Caller.SingleCall(context.Background(), rpcblock.Latest, contract.Call("superchainConfig")) - require.NoError(t, err) + t.Require().NoError(err) return results.GetAddress(0) } -func getPrestate(t systest.T, l1Client *ethclient.Client, l2Chain system.L2Chain) [32]byte { - dgf, ok := l2Chain.L1Addresses()["DisputeGameFactoryProxy"] - require.True(t, ok, "DisputeGameFactoryProxy not found") - dgfContract, err := bindings.NewDisputeGameFactory(dgf, l1Client) - require.NoError(t, err) - - gameImpl, err := dgfContract.GameImpls(nil, 0) - require.NoError(t, err) - fdgContract, err := bindings.NewFaultDisputeGame(gameImpl, l1Client) - require.NoError(t, err) - - prestate, err := fdgContract.AbsolutePrestate(nil) - require.NoError(t, err) - return prestate +func getPrestate(t devtest.T, l1Caller *batching.MultiCaller, l2Chain *dsl.L2Network) [32]byte { + dgf := l2Chain.DisputeGameFactoryProxyAddr() + dgfContract := batching.NewBoundContract(disputeGameFactoryABI, dgf) + results, err := l1Caller.SingleCall(context.Background(), rpcblock.Latest, dgfContract.Call("gameImpls", uint32(0))) + t.Require().NoError(err) + gameImpl := results.GetAddress(0) + + fdgContract := batching.NewBoundContract(faultDisputeGameABI, gameImpl) + prestateResults, err := l1Caller.SingleCall(context.Background(), rpcblock.Latest, fdgContract.Call("absolutePrestate")) + t.Require().NoError(err) + return prestateResults.GetHash(0) } -func getRespectedGameType(t systest.T, l1Caller *batching.MultiCaller, l2Chain system.L2Chain) uint32 { - portalAddress, ok := l2Chain.L1Addresses()["OptimismPortalProxy"] - require.True(t, ok, "OptimismPortalProxy not found") +func getRespectedGameType(t devtest.T, l1Caller *batching.MultiCaller, l2Chain *dsl.L2Network) uint32 { + portalAddress := l2Chain.DepositContractAddr() contract := batching.NewBoundContract(portalABI, portalAddress) results, err := l1Caller.SingleCall(context.Background(), rpcblock.Latest, contract.Call("respectedGameType")) - require.NoError(t, err) + t.Require().NoError(err) return results.GetUint32(0) } diff --git a/op-acceptance-tests/tests/jovian/init_test.go b/op-acceptance-tests/tests/jovian/init_test.go new file mode 100644 index 0000000000000..6ff65ffe07f71 --- /dev/null +++ b/op-acceptance-tests/tests/jovian/init_test.go @@ -0,0 +1,11 @@ +package jovian + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithMinimal(), presets.WithJovianAtGenesis()) +} diff --git a/op-acceptance-tests/tests/jovian/min_base_fee_test.go b/op-acceptance-tests/tests/jovian/min_base_fee_test.go new file mode 100644 index 0000000000000..28080e893bfee --- /dev/null +++ b/op-acceptance-tests/tests/jovian/min_base_fee_test.go @@ -0,0 +1,158 @@ +package jovian + +import ( + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" + + "encoding/binary" + "time" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" + "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +type minBaseFeeEnv struct { + l1Client *dsl.L1ELNode + l2Network *dsl.L2Network + l2EL *dsl.L2ELNode + systemConfig minBaseFeeSystemConfig +} + +type minBaseFeeSystemConfig struct { + SetMinBaseFee func(minBaseFee uint64) bindings.TypedCall[any] `sol:"setMinBaseFee"` + MinBaseFee func() bindings.TypedCall[uint64] `sol:"minBaseFee"` +} + +func newMinBaseFee(t devtest.T, l2Network *dsl.L2Network, l1EL *dsl.L1ELNode, l2EL *dsl.L2ELNode) *minBaseFeeEnv { + systemConfig := bindings.NewBindings[minBaseFeeSystemConfig]( + bindings.WithClient(l1EL.EthClient()), + bindings.WithTo(l2Network.Escape().Deployment().SystemConfigProxyAddr()), + bindings.WithTest(t)) + + return &minBaseFeeEnv{ + l1Client: l1EL, + l2Network: l2Network, + l2EL: l2EL, + systemConfig: systemConfig, + } +} + +func (mbf *minBaseFeeEnv) checkCompatibility(t devtest.T) { + _, err := contractio.Read(mbf.systemConfig.MinBaseFee(), t.Ctx()) + if err != nil { + t.Fail() + } +} + +func (mbf *minBaseFeeEnv) getSystemConfigOwner(t devtest.T) *dsl.EOA { + priv := mbf.l2Network.Escape().Keys().Secret(devkeys.SystemConfigOwner.Key(mbf.l2Network.ChainID().ToBig())) + return dsl.NewKey(t, priv).User(mbf.l1Client) +} + +func (mbf *minBaseFeeEnv) setMinBaseFeeViaSytemConfigOnL1(t devtest.T, minBaseFee uint64) { + owner := mbf.getSystemConfigOwner(t) + + _, err := contractio.Write(mbf.systemConfig.SetMinBaseFee(minBaseFee), t.Ctx(), owner.Plan()) + t.Require().NoError(err, "SetMinBaseFee transaction failed") + + t.Logf("Set min base fee on L1: minBaseFee=%d", minBaseFee) +} + +func (mbf *minBaseFeeEnv) verifyMinBaseFee(t devtest.T, minBase *big.Int) { + // Wait for the next block + _ = mbf.l2EL.WaitForBlock() + el := mbf.l2EL.Escape().EthClient() + info, err := el.InfoByLabel(t.Ctx(), "latest") + t.Require().NoError(err) + + // Verify base fee is clamped + t.Require().True(info.BaseFee().Cmp(minBase) >= 0, "expected base fee to be >= minBaseFee") + t.Logf("base fee %s, minBase %s", info.BaseFee(), minBase) +} + +// waitForMinBaseFeeConfigChangeOnL2 waits until the L2 latest payload extra-data encodes the expected min base fee. +func (mbf *minBaseFeeEnv) waitForMinBaseFeeConfigChangeOnL2(t devtest.T, expected uint64) { + client := mbf.l2EL.Escape().L2EthClient() + expectedExtraData := eth.BytesMax32(eip1559.EncodeMinBaseFeeExtraData(250, 6, expected)) + + // Check extradata in block header (for all clients) + var actualBlockExtraData []byte + t.Require().Eventually(func() bool { + info, err := client.InfoByLabel(t.Ctx(), "latest") + if err != nil { + return false + } + + // Get header RLP and decode to access Extra field + headerRLP, err := info.HeaderRLP() + if err != nil { + return false + } + + var header types.Header + if err := rlp.DecodeBytes(headerRLP, &header); err != nil { + return false + } + + if len(header.Extra) != 17 { + return false + } + + got := binary.BigEndian.Uint64(header.Extra[9:]) + actualBlockExtraData = header.Extra + return got == expected + }, 2*time.Minute, 5*time.Second, "L2 min base fee in block header did not sync within timeout") + + t.Require().Equal(expectedExtraData, eth.BytesMax32(actualBlockExtraData), "block header extradata doesnt match") +} + +// TestMinBaseFee verifies configurable minimum base fee using devstack presets. +func TestMinBaseFee(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + require := t.Require() + + err := dsl.RequiresL2Fork(t.Ctx(), sys, 0, rollup.Jovian) + require.NoError(err, "Jovian fork must be active for this test") + + minBaseFee := newMinBaseFee(t, sys.L2Chain, sys.L1EL, sys.L2EL) + minBaseFee.checkCompatibility(t) + + systemOwner := minBaseFee.getSystemConfigOwner(t) + sys.FunderL1.FundAtLeast(systemOwner, eth.OneTenthEther) + + testCases := []struct { + name string + minBaseFee uint64 + }{ + // High minimum base fee + {"MinBaseFeeHigh", 2_000_000_000}, + // Medium minimum base fee + {"MinBaseFeeMedium", 1_000_000_000}, + // Zero minimum base fee (not enforced) + {"MinBaseFeeZero", 0}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t devtest.T) { + minBaseFee.setMinBaseFeeViaSytemConfigOnL1(t, tc.minBaseFee) + minBaseFee.waitForMinBaseFeeConfigChangeOnL2(t, tc.minBaseFee) + + minBaseFee.verifyMinBaseFee(t, big.NewInt(int64(tc.minBaseFee))) + + t.Log("Test completed successfully:", + "testCase", tc.name, + "minBaseFee", tc.minBaseFee) + }) + } +} diff --git a/op-acceptance-tests/tests/osaka/osaka_test.go b/op-acceptance-tests/tests/osaka/osaka_test.go new file mode 100644 index 0000000000000..eabd454749b8c --- /dev/null +++ b/op-acceptance-tests/tests/osaka/osaka_test.go @@ -0,0 +1,214 @@ +package osaka + +import ( + "bytes" + "context" + "crypto/rand" + "fmt" + "math/big" + "os" + "os/exec" + "strings" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/loadtest" + "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txinclude" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +// configureDevstackEnvVars sets the appropriate env vars to use a mise-installed geth binary for +// the L1 EL. This is useful in Osaka acceptance tests since op-geth does not include full Osaka +// support. This is meant to run before presets.DoMain in a TestMain function. It will log to +// stdout. ResetDevstackEnvVars should be used to reset the environment variables when TestMain +// exits. +// +// Note that this is a no-op if either [sysgo.DevstackL1ELKindVar] or [sysgo.GethExecPathEnvVar] +// are set. +// +// The returned callback resets any modified environment variables. +func configureDevstackEnvVars() func() { + if _, ok := os.LookupEnv(sysgo.DevstackL1ELKindEnvVar); ok { + return func() {} + } + if _, ok := os.LookupEnv(sysgo.GethExecPathEnvVar); ok { + return func() {} + } + + cmd := exec.Command("mise", "which", "geth") + buf := bytes.NewBuffer([]byte{}) + cmd.Stdout = buf + if err := cmd.Run(); err != nil { + fmt.Printf("Failed to find mise-installed geth: %v\n", err) + return func() {} + } + execPath := strings.TrimSpace(buf.String()) + fmt.Println("Found mise-installed geth:", execPath) + _ = os.Setenv(sysgo.GethExecPathEnvVar, execPath) + _ = os.Setenv(sysgo.DevstackL1ELKindEnvVar, "geth") + return func() { + _ = os.Unsetenv(sysgo.GethExecPathEnvVar) + _ = os.Unsetenv(sysgo.DevstackL1ELKindEnvVar) + } +} + +func TestMain(m *testing.M) { + resetEnvVars := configureDevstackEnvVars() + defer resetEnvVars() + + presets.DoMain(m, stack.MakeCommon(stack.Combine[*sysgo.Orchestrator]( + sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{}), + sysgo.WithDeployerOptions(func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + _, l1Config := builder.WithL1(sysgo.DefaultL1ID) + l1Config.WithOsakaOffset(0) + l1Config.WithBPO1Offset(0) + l1Config.WithL1BlobSchedule(¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + Prague: params.DefaultPragueBlobConfig, + BPO1: params.DefaultBPO1BlobConfig, + }) + }), + sysgo.WithBatcherOption(func(_ stack.L2BatcherID, cfg *batcher.CLIConfig) { + cfg.DataAvailabilityType = flags.BlobsType + }), + ))) +} + +func TestBatcherUsesNewSidecarFormatAfterOsaka(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + t.Log("Waiting for Osaka to activate") + t.Require().NotNil(sys.L1Network.Escape().ChainConfig().OsakaTime) + sys.L1EL.WaitForTime(*sys.L1Network.Escape().ChainConfig().OsakaTime) + t.Log("Osaka activated") + + // 1. Wait for the sequencer to build a block after Osaka is activated. This avoids a race + // condition where the unsafe head has been posted as part of a blob, but has not been + // marked as "safe" yet. + sys.L2EL.WaitForBlock() + + // 2. Wait for the batcher to include target in a batch and post it to L1. Because the batch is + // posted after Osaka has activated, it means the batcher must have successfully used the + // new format. + target := sys.L2EL.BlockRefByLabel(eth.Unsafe) + blockTime := time.Duration(sys.L2Chain.Escape().RollupConfig().BlockTime) * time.Second + for range time.Tick(blockTime) { + if sys.L2EL.BlockRefByLabel(eth.Safe).Number >= target.Number { + // If the safe head is ahead of the target height and the target block is part of the + // canonical chain, then the target block is safe. + _, err := sys.L2EL.Escape().EthClient().BlockRefByHash(t.Ctx(), target.Hash) + t.Require().NoError(err) + return + } + } +} + +func TestBlobBaseFeeIsCorrectAfterBPOFork(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewMinimal(t) + t.Log("Waiting for BPO1 to activate") + t.Require().NotNil(sys.L1Network.Escape().ChainConfig().BPO1Time) + sys.L1EL.WaitForTime(*sys.L1Network.Escape().ChainConfig().BPO1Time) + t.Log("BPO1 activated") + + sys.L1EL.WaitForBlock() + l1BlockTime := sys.L1EL.EstimateBlockTime() + l1ChainConfig := sys.L1Network.Escape().ChainConfig() + + spamBlobs(t, sys) // Raise the blob base fee to make blob parameter changes visible. + + // Wait for the blob base fee to rise above 1 so the blob parameter changes will be visible. + for range time.Tick(l1BlockTime) { + info, _, err := sys.L1EL.EthClient().InfoAndTxsByLabel(t.Ctx(), eth.Unsafe) + t.Require().NoError(err) + if calcBlobBaseFee(l1ChainConfig, info).Cmp(big.NewInt(1)) > 0 { + break + } + t.Logf("Waiting for blob base fee to rise above 1") + } + + l2UnsafeRef := sys.L2CL.SyncStatus().UnsafeL2 + + // Get the L1 blob base fee. + l1OriginInfo, err := sys.L1EL.EthClient().InfoByHash(t.Ctx(), l2UnsafeRef.L1Origin.Hash) + t.Require().NoError(err) + l1BlobBaseFee := calcBlobBaseFee(l1ChainConfig, l1OriginInfo) + + // Get the L2 blob base fee from the system deposit tx. + info, txs, err := sys.L2EL.Escape().EthClient().InfoAndTxsByHash(t.Ctx(), l2UnsafeRef.Hash) + t.Require().NoError(err) + blockInfo, err := derive.L1BlockInfoFromBytes(sys.L2Chain.Escape().RollupConfig(), info.Time(), txs[0].Data()) + t.Require().NoError(err) + l2BlobBaseFee := blockInfo.BlobBaseFee + + t.Require().Equal(l1BlobBaseFee, l2BlobBaseFee) +} + +func spamBlobs(t devtest.T, sys *presets.Minimal) { + l1BlockTime := sys.L1EL.EstimateBlockTime() + l1ChainConfig := sys.L1Network.Escape().ChainConfig() + + eoa := sys.FunderL1.NewFundedEOA(eth.OneEther.Mul(5)) + signer := txinclude.NewPkSigner(eoa.Key().Priv(), sys.L1Network.ChainID().ToBig()) + l1ETHClient := sys.L1EL.EthClient() + syncEOA := loadtest.NewSyncEOA(txinclude.NewPersistent(signer, struct { + *txinclude.Monitor + *txinclude.Resubmitter + }{ + txinclude.NewMonitor(l1ETHClient, l1BlockTime), + txinclude.NewResubmitter(l1ETHClient, l1BlockTime), + }), eoa.Plan()) + + var blob eth.Blob + _, err := rand.Read(blob[:]) + t.Require().NoError(err) + // get the field-elements into a valid range + for i := range 4096 { + blob[32*i] &= 0b0011_1111 + } + + const maxBlobTxsPerAccountInMempool = 16 // Private policy param in geth. + spammer := loadtest.SpammerFunc(func(t devtest.T) error { + _, err := syncEOA.Include(t, txplan.WithBlobs([]*eth.Blob{&blob}, l1ChainConfig), txplan.WithTo(&common.Address{})) + return err + }) + txsPerSlot := min(l1ChainConfig.BlobScheduleConfig.BPO1.Max*3/4, maxBlobTxsPerAccountInMempool) + schedule := loadtest.NewConstant(l1BlockTime, loadtest.WithBaseRPS(uint64(txsPerSlot))) + + ctx, cancel := context.WithCancel(t.Ctx()) + var wg sync.WaitGroup + t.Cleanup(func() { + cancel() + wg.Wait() + }) + wg.Add(1) + go func() { + defer wg.Done() + schedule.Run(t.WithCtx(ctx), spammer) + }() +} + +func calcBlobBaseFee(cfg *params.ChainConfig, info eth.BlockInfo) *big.Int { + return eip4844.CalcBlobFee(cfg, &types.Header{ + // It's unfortunate that we can't build a proper header from a BlockInfo. + // We do our best to work around deficiencies in the BlockInfo implementation here. + Time: info.Time(), + ExcessBlobGas: info.ExcessBlobGas(), + }) +} diff --git a/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go b/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go index e112246395f4f..5bb992e531951 100644 --- a/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go +++ b/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go @@ -42,14 +42,13 @@ func TestNotTruncateDatabaseOnRestartWithExistingDatabase(gt *testing.T) { t := devtest.SerialT(gt) sys := presets.NewSingleChainMultiNode(t) - startSafeBlock := sys.L2CLB.SafeL2BlockRef().Number - dsl.CheckAll(t, sys.L2CL.AdvancedFn(types.LocalSafe, 1, 30), sys.L2CLB.AdvancedFn(types.LocalSafe, 1, 30)) - sys.L2CLB.Matched(sys.L2CL, types.LocalSafe, 30) - sys.L2CLB.VerifySafeHeadDatabaseMatches(sys.L2CL) + + preRestartSafeBlock := sys.L2CLB.SafeL2BlockRef().Number + sys.L2CLB.VerifySafeHeadDatabaseMatches(sys.L2CL, dsl.WithMinRequiredL2Block(preRestartSafeBlock)) // Restart the verifier op-node, but not the EL so the existing chain data is not deleted. sys.L2CLB.Stop() @@ -61,5 +60,5 @@ func TestNotTruncateDatabaseOnRestartWithExistingDatabase(gt *testing.T) { sys.L2CLB.Matched(sys.L2CL, types.LocalSafe, 30) sys.L2CLB.Advanced(types.LocalSafe, 1, 30) // At least one safe head db update after resync - sys.L2CLB.VerifySafeHeadDatabaseMatches(sys.L2CL, dsl.WithMinRequiredL2Block(startSafeBlock)) + sys.L2CLB.VerifySafeHeadDatabaseMatches(sys.L2CL, dsl.WithMinRequiredL2Block(preRestartSafeBlock)) } diff --git a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go new file mode 100644 index 0000000000000..75dffe931e0b2 --- /dev/null +++ b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go @@ -0,0 +1,23 @@ +package gap_clp2p + +import ( + "testing" + + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + // No ELP2P, CLP2P to control the supply of unsafe payload to the CL + presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), + presets.WithExecutionLayerSyncOnVerifiers(), + presets.WithCompatibleTypes(compat.SysGo), + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + // For stopping derivation, not to advance safe heads + cfg.Stopped = true + })), + ) +} diff --git a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go new file mode 100644 index 0000000000000..21b4a3ed2cba7 --- /dev/null +++ b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go @@ -0,0 +1,51 @@ +package gap_clp2p + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestSyncAfterInitialELSync(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + require := t.Require() + + sys.L2CL.Advanced(types.LocalUnsafe, 7, 30) + + // batcher down so safe not advanced + require.Equal(uint64(0), sys.L2CL.HeadBlockRef(types.LocalSafe).Number) + require.Equal(uint64(0), sys.L2CLB.HeadBlockRef(types.LocalSafe).Number) + // verifier not advanced unsafe head + require.Equal(uint64(0), sys.L2CLB.HeadBlockRef(types.LocalUnsafe).Number) + + // Finish EL sync by supplying the first block + // EL Sync finished because underlying EL has states to validate the payload for block 1 + sys.L2CLB.SignalTarget(sys.L2EL, 1) + + // Send payloads for block 3, 4, 5, 7 which will fill in unsafe payload queue, block 2 missed + // Non-canonical payloads will be not sent to L2EL + // Order does not matter + for _, target := range []uint64{5, 3, 4, 7} { + sys.L2CLB.SignalTarget(sys.L2EL, target) + // Canonical unsafe head never advances because of the gap + require.Equal(uint64(1), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + } + + // Send missing gap, payload 2, still not sending FCU since unsafe gap exists + sys.L2CLB.SignalTarget(sys.L2EL, 2) + + retries := 2 + // Gap filled and payload 2, 3, 4, 5 became canonical by relaying to ELB. + // Payload 7 is still in the unsafe payload queue because of unsafe gap + sys.L2ELB.Reached(eth.Unsafe, 5, retries) + + // Send missing gap, payload 6 + sys.L2CLB.SignalTarget(sys.L2EL, 6) + + // Gap filled and block 6, 7 became canonical by relaying to ELB. + sys.L2ELB.Reached(eth.Unsafe, 7, retries) +} diff --git a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go new file mode 100644 index 0000000000000..280fdd55ad511 --- /dev/null +++ b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go @@ -0,0 +1,22 @@ +package gap_elp2p + +import ( + "testing" + + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + // No ELP2P, CLP2P to control the supply of unsafe payload to the CL + presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), + presets.WithCompatibleTypes(compat.SysGo), + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + // For stopping derivation, not to advance safe heads + cfg.Stopped = true + })), + ) +} diff --git a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go new file mode 100644 index 0000000000000..f3961fa94cc3e --- /dev/null +++ b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go @@ -0,0 +1,232 @@ +package gap_elp2p + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum" +) + +// TestL2ELP2PCanonicalChainAdvancedByFCU verifies the interaction between NewPayload, +// ForkchoiceUpdate (FCU), and ELP2P/EL sync in a multi-node L2 test network. +// +// Scenario +// - Start a single-chain, multi-node system without ELP2P connectivity for L2ELB. +// - Advance the reference node (L2EL) so it is ahead of L2ELB. +// +// Expectations covered by this test +// +// NewPayload without parents present: +// - Does NOT trigger EL sync. +// - Returns SYNCING for future blocks (startNum+3/5/4/6). +// +// NewPayload on a non canonical chain with available state: +// - Can extend a non canonical chain (startNum+1 then +2) and returns VALID. +// - These non canonical chain blocks are retrievable by hash but remain non-canonical +// (BlockRefByNumber returns NotFound) until FCU marks them valid. +// +// FCU promoting non canonical chain to canonical: +// - FCU to startNum+2 marks the previously imported non canonical blocks valid +// and advances L2ELB canonical head to startNum+2. +// +// FCU targeting a block that cannot yet be validated (missing ancestors): +// - Triggers EL sync on L2EL (skeleton/backfill logs), returns SYNCING, +// and does not advance the head while ELP2P is still unavailable. +// +// Enabling ELP2P and eventual validation: +// - After peering L2ELB with L2EL, FCU to startNum+4 eventually becomes VALID +// once EL sync completes; the test waits for canonicalization and confirms head advances. +// - Subsequent gaps (to startNum+6, then +8) are resolved by FCU with +// WaitUntilValid, advancing the canonical head each time. +// +// NewPayload still does not initiate EL sync: +// - A NewPayload to startNum+10 returns SYNCING and the block remains unknown by number +// until an FCU is issued, which initially returns SYNCING. +// +// Insights +// - NewPayload alone never initiates EL sync, but can build a non canonical chain if state exists. +// - FCU is the mechanism that (a) promotes non canonical chain blocks to canonical when they are +// already fully validated, and (b) triggers EL sync when ancestors are missing. +// - Previously submitted NewPayloads that returned SYNCING are not retained to automatically +// assemble a non canonical chain later. +// - With ELP2P enabled, repeated FCU attempts eventually validate and advance the canonical chain. +func TestL2ELP2PCanonicalChainAdvancedByFCU(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + require := t.Require() + logger := t.Logger() + + // Advance few blocks to make sure reference node advanced + sys.L2CL.Advanced(types.LocalUnsafe, 10, 30) + + sys.L2CLB.Stop() + + // At this point, L2ELB has no ELP2P, and L2CL connection + startNum := sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number + + // NewPayload does not trigger the EL Sync + // Example logs from L2EL(geth) + // New skeleton head announced + // Ignoring payload with missing parent + targetNum := startNum + 3 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsSyncing() + + // NewPayload does not trigger the EL Sync + // Example logs from L2EL(geth) + // New skeleton head announced + // Ignoring payload with missing parent + targetNum = startNum + 5 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsSyncing() + + // NewPayload does not trigger the EL Sync + // Example logs from L2EL(geth) + // New skeleton head announced + // Ignoring payload with missing parent + targetNum = startNum + 4 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsSyncing() + + // NewPayload can extend non canonical chain because L2EL has state for startNum and can validate payload + // Example logs from L2EL(geth) + // Inserting block without sethead + // Persisted trie from memory database + // Imported new potential chain segment + targetNum = startNum + 1 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsValid() + logger.Info("Non canonical chain advanced", "number", targetNum) + + // NewPayload can extend non canonical chain because L2EL has state for startNum+1 and can validate payload + // Example logs from L2EL(geth) + // Inserting block without sethead + // Persisted trie from memory database + // Imported new potential chain segment + targetNum = startNum + 2 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsValid() + logger.Info("Non canonical chain advanced", "number", targetNum) + + // Non canonical chain can be fetched via blockhash + blockRef := sys.L2EL.BlockRefByNumber(targetNum) + nonCan := sys.L2ELB.BlockRefByHash(blockRef.Hash) + require.Equal(uint64(targetNum), nonCan.Number) + require.Equal(blockRef.Hash, nonCan.Hash) + // Still targetNum block is non canonicalized + _, err := sys.L2ELB.Escape().L2EthClient().BlockRefByNumber(t.Ctx(), targetNum) + require.ErrorIs(err, ethereum.NotFound) + + // Previously inserted payloads are not used to make non-canonical chain automatically + blockRef = sys.L2EL.BlockRefByNumber(startNum + 3) + _, err = sys.L2ELB.Escape().EthClient().BlockRefByHash(t.Ctx(), blockRef.Hash) + require.ErrorIs(err, ethereum.NotFound) + blockRef = sys.L2EL.BlockRefByNumber(startNum + 5) + _, err = sys.L2ELB.Escape().EthClient().BlockRefByHash(t.Ctx(), blockRef.Hash) + require.ErrorIs(err, ethereum.NotFound) + + // No FCU yet so head not advanced yet + require.Equal(startNum, sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // NewPayload does not trigger the EL Sync + // Example logs from L2EL(geth) + // New skeleton head announced + // Ignoring payload with missing parent + targetNum = startNum + 6 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsSyncing() + + // FCU marks startNum + 2 as valid, promoting non canonical blocks to canonical blocks + // Example logs from L2EL(geth) + // Extend chain + // Chain head was updated + targetNum = startNum + 2 + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsValid() + logger.Info("Canonical chain advanced", "number", targetNum) + + // Head advanced, canonical head bumped + require.Equal(uint64(targetNum), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // FCU to target block which cannot be validated, triggers EL Sync but ELP2P not yet available + // Example logs from L2EL(geth) + // New skeleton head announced + // created initial skeleton subchain + // Starting reverse header sync cycle + // Block synchronisation started + // Backfilling with the network + targetNum = startNum + 3 + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsSyncing() + + // head not advanced + require.Equal(uint64(startNum+2), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // FCU to target block which cannot be validated + // Example logs from L2EL(geth) + // New skeleton head announced + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsSyncing() + + // head not advanced + require.Equal(uint64(startNum+2), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // FCU to target block which cannot be validated + // Example logs from L2EL(geth) + // New skeleton head announced + targetNum = startNum + 4 + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsSyncing() + + // head not advanced + require.Equal(uint64(startNum+2), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // Finally peer for enabling ELP2P + sys.L2ELB.PeerWith(sys.L2EL) + + // We allow three attempts. Most of the time, two attempts are enough + // At first attempt, L2EL starts EL Sync, returing SYNCING. + // Before second attempt, L2EL finishes EL Sync, and updates targetNum as canonical + // At second attempt, L2EL returns VALID since targetNum is already canonical + attempts := 3 + + // FCU to target block which can be eventually validated, because ELP2P enabled + // Example logs from L2EL(geth) + // New skeleton head announced + // Backfilling with the network + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsSyncing() + + // Wait until L2EL finishes EL Sync and canonicalizes until targetNum + sys.L2ELB.Reached(eth.Unsafe, targetNum, 3) + + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).WaitUntilValid(attempts) + logger.Info("Canonical chain advanced", "number", targetNum) + + // head advanced + require.Equal(uint64(targetNum), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // FCU to target block which can be eventually validated, because ELP2P enabled + // Example logs from L2EL(geth) + // "Restarting sync cycle" reason="chain gapped, head: 4, newHead: 6" + targetNum = startNum + 6 + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).WaitUntilValid(attempts) + logger.Info("Canonical chain advanced", "number", targetNum) + + // head advanced + require.Equal(uint64(targetNum), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // FCU to target block which can be eventually validated, because ELP2P enabled + // Example logs from L2EL(geth) + // "Restarting sync cycle" reason="chain gapped, head: 6, newHead: 8" + targetNum = startNum + 8 + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).WaitUntilValid(attempts) + logger.Info("Canonical chain advanced", "number", targetNum) + + // head advanced + require.Equal(uint64(targetNum), sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) + + // NewPayload does not trigger EL Sync + targetNum = startNum + 10 + sys.L2ELB.NewPayload(sys.L2EL, targetNum).IsSyncing() + _, err = sys.L2ELB.Escape().L2EthClient().BlockRefByNumber(t.Ctx(), targetNum) + require.ErrorIs(err, ethereum.NotFound) + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, targetNum, 0, 0, nil).IsSyncing() + + t.Cleanup(func() { + sys.L2CLB.Start() + sys.L2ELB.DisconnectPeerWith(sys.L2EL) + }) +} diff --git a/op-acceptance-tests/tests/sync/manual/init_test.go b/op-acceptance-tests/tests/sync/manual/init_test.go new file mode 100644 index 0000000000000..ac448c396b413 --- /dev/null +++ b/op-acceptance-tests/tests/sync/manual/init_test.go @@ -0,0 +1,22 @@ +package manual + +import ( + "testing" + + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + // No ELP2P, CLP2P to control the supply of unsafe payload to the CL + presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), + presets.WithCompatibleTypes(compat.SysGo), + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + // For stopping derivation, not to advance safe heads + cfg.Stopped = true + })), + ) +} diff --git a/op-acceptance-tests/tests/sync/manual/sync_test.go b/op-acceptance-tests/tests/sync/manual/sync_test.go new file mode 100644 index 0000000000000..65317f3c8a92e --- /dev/null +++ b/op-acceptance-tests/tests/sync/manual/sync_test.go @@ -0,0 +1,60 @@ +package manual + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum" +) + +func TestVerifierManualSync(gt *testing.T) { + t := devtest.SerialT(gt) + + // Disable ELP2P and Batcher + sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + require := t.Require() + logger := t.Logger() + + delta := uint64(7) + sys.L2CL.Advanced(types.LocalUnsafe, delta, 30) + + // Disable Derivation + sys.L2CLB.Stop() + + startBlockNum := sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number + + // Manual Block insertion using engine APIs + for i := uint64(1); i <= delta; i++ { + blockNum := startBlockNum + i + block := sys.L2EL.BlockRefByNumber(blockNum) + // Validator does not have canonical nor noncanonical block for blockNum + _, err := sys.L2ELB.Escape().EthClient().BlockRefByNumber(t.Ctx(), blockNum) + require.Error(err, ethereum.NotFound) + _, err = sys.L2ELB.Escape().EthClient().BlockRefByHash(t.Ctx(), block.Hash) + require.Error(err, ethereum.NotFound) + + // Insert payload + logger.Info("NewPayload", "target", blockNum) + sys.L2ELB.NewPayload(sys.L2EL, blockNum).IsValid() + // Payload valid but not canonicalized. Cannot fetch block by number + _, err = sys.L2ELB.Escape().EthClient().BlockRefByNumber(t.Ctx(), blockNum) + require.Error(err, ethereum.NotFound) + // Now fetchable by hash + require.Equal(blockNum, sys.L2ELB.BlockRefByHash(block.Hash).Number) + + // FCU + logger.Info("ForkchoiceUpdate", "target", blockNum) + sys.L2ELB.ForkchoiceUpdate(sys.L2EL, blockNum, 0, 0, nil).IsValid() + // Payload valid and canonicalized + require.Equal(block.Hash, sys.L2ELB.BlockRefByNumber(blockNum).Hash) + require.Equal(blockNum, sys.L2ELB.BlockRefByHash(block.Hash).Number) + } + + // Check correctly synced by comparing with sequencer EL + res := sys.L2ELB.BlockRefByLabel(eth.Unsafe) + require.Equal(startBlockNum+delta, res.Number) + require.Equal(sys.L2EL.BlockRefByNumber(startBlockNum+delta).Hash, res.Hash) +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go new file mode 100644 index 0000000000000..171ff454e4c2c --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go @@ -0,0 +1,14 @@ +package sync_tester_e2e + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithSimpleWithSyncTester(), + presets.WithCompatibleTypes(compat.SysGo), + ) +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go new file mode 100644 index 0000000000000..f3e493e77e71a --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go @@ -0,0 +1,90 @@ +package sync_tester_e2e + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestSyncTesterE2E(gt *testing.T) { + t := devtest.SerialT(gt) + // This test uses DefaultSimpleSystemWithSyncTester which includes: + // - Minimal setup with L1EL, L1CL, L2EL, L2CL (sequencer) + // - Additional L2CL2 (verifier) that connects to SyncTester instead of L2EL + sys := presets.NewSimpleWithSyncTester(t) + require := t.Require() + logger := t.Logger() + ctx := t.Ctx() + + // Test that we can get chain IDs from both L2CL nodes + l2CLChainID := sys.L2CL.ID().ChainID() + require.Equal(eth.ChainIDFromUInt64(901), l2CLChainID, "first L2CL should be on chain 901") + + l2CL2ChainID := sys.L2CL2.ID().ChainID() + require.Equal(eth.ChainIDFromUInt64(901), l2CL2ChainID, "second L2CL should be on chain 901") + + // Test that the network started successfully + require.NotNil(sys.L1EL, "L1 EL node should be available") + require.NotNil(sys.L2EL, "L2 EL node should be available") + require.NotNil(sys.L2CL, "L2 CL node should be available") + require.NotNil(sys.SyncTester, "SyncTester should be available") + require.NotNil(sys.L2CL2, "Second L2 CL node should be available") + require.NotNil(sys.SyncTesterL2EL, "SyncTester L2 EL node should be available") + + sessionIDs := sys.SyncTester.ListSessions() + require.GreaterOrEqual(len(sessionIDs), 1, "at least one session") + + sessionID := sessionIDs[0] + logger.Info("SyncTester EL", "sessionID", sessionID) + + session := sys.SyncTester.GetSession(sessionID) + + require.Equal(eth.FCUState{Latest: 0, Safe: 0, Finalized: 0}, session.InitialState) + + target := uint64(5) + dsl.CheckAll(t, + sys.L2CL.AdvancedFn(types.LocalUnsafe, target, 30), + sys.L2CL2.AdvancedFn(types.LocalUnsafe, target, 30), + ) + + // Test that we can get chain ID from SyncTester + syncTesterChainID := sys.SyncTester.ChainID(sessionID) + require.Equal(eth.ChainIDFromUInt64(901), syncTesterChainID, "SyncTester should be on chain 901") + + // Test that both L2CL nodes and SyncTester are on the same chain + require.Equal(l2CLChainID, l2CL2ChainID, "both L2CL nodes should be on the same chain") + require.Equal(l2CLChainID, syncTesterChainID, "L2CL nodes and SyncTester should be on the same chain") + + // Test that we can get sync status from L2CL nodes + l2CLSyncStatus := sys.L2CL.SyncStatus() + require.NotNil(l2CLSyncStatus, "first L2CL should have sync status") + + l2CL2SyncStatus := sys.L2CL2.SyncStatus() + require.NotNil(l2CL2SyncStatus, "second L2CL should have sync status") + + t.Logger().Info("SyncTester E2E test completed successfully", + "l2cl_chain_id", l2CLChainID, + "l2cl2_chain_id", l2CL2ChainID, + "sync_tester_chain_id", syncTesterChainID, + "l2cl_sync_status", l2CLSyncStatus, + "l2cl2_sync_status", l2CL2SyncStatus) + + unsafeNum := sys.SyncTesterL2EL.BlockRefByLabel(eth.Unsafe).Number + require.True(unsafeNum >= target, unsafeNum) + + session = sys.SyncTester.GetSession(sessionID) + require.GreaterOrEqual(session.CurrentState.Latest, target) + + sys.SyncTester.DeleteSession(sessionID) + + syncTesterClient := sys.SyncTester.Escape().APIWithSession(sessionID) + + require.ErrorContains(syncTesterClient.DeleteSession(ctx), "already deleted") + + _, err := syncTesterClient.GetSession(ctx) + require.ErrorContains(err, "already deleted") +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go new file mode 100644 index 0000000000000..4de948305c5a2 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go @@ -0,0 +1,62 @@ +package sync_tester_elsync + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestSyncTesterELSync(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSimpleWithSyncTester(t) + require := t.Require() + logger := t.Logger() + ctx := t.Ctx() + + target := uint64(5) + dsl.CheckAll(t, + sys.L2CL.AdvancedFn(types.LocalUnsafe, target, 30), + sys.L2CL2.AdvancedFn(types.LocalUnsafe, target, 30), + ) + + // Stop L2CL2 attached to Sync Tester EL Endpoint + sys.L2CL2.Stop() + + // Reset Sync Tester EL + sessionIDs := sys.SyncTester.ListSessions() + require.GreaterOrEqual(len(sessionIDs), 1, "at least one session") + sessionID := sessionIDs[0] + logger.Info("SyncTester EL", "sessionID", sessionID) + syncTesterClient := sys.SyncTester.Escape().APIWithSession(sessionID) + require.NoError(syncTesterClient.ResetSession(ctx)) + + // Wait for L2CL to advance more unsafe blocks + sys.L2CL.Advanced(types.LocalUnsafe, target+5, 30) + + // EL Sync not done yet + session, err := syncTesterClient.GetSession(ctx) + require.NoError(err) + require.True(session.ELSyncActive) + + // Restarting will trigger EL sync since unsafe head payload will arrive to L2CL2 via P2P + sys.L2CL2.Start() + + // Wait until P2P is connected + sys.L2CL2.IsP2PConnected(sys.L2CL) + + // Reaches EL Sync Target and advances + target = uint64(40) + sys.L2CL2.Reached(types.LocalUnsafe, target, 30) + + session, err = syncTesterClient.GetSession(ctx) + require.NoError(err) + require.False(session.ELSyncActive) + + // Check CL2 view is consistent with read only EL + unsafeHead := sys.L2CL2.SyncStatus().UnsafeL2 + require.GreaterOrEqual(unsafeHead.Number, target) + require.Equal(sys.L2EL.BlockRefByNumber(unsafeHead.Number).Hash, unsafeHead.Hash) +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go new file mode 100644 index 0000000000000..3b90eb66724f0 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go @@ -0,0 +1,17 @@ +package sync_tester_elsync + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, + presets.WithExecutionLayerSyncOnVerifiers(), + presets.WithSimpleWithSyncTester(), + presets.WithELSyncTarget(35), + presets.WithCompatibleTypes(compat.SysGo), + ) +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go new file mode 100644 index 0000000000000..28bdae5449247 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go @@ -0,0 +1,252 @@ +package sync_tester_ext_el + +import ( + "fmt" + "os" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" +) + +// Configuration defaults for op-sepolia +const ( + DefaultNetworkPreset = "op-sepolia" + + // Tailscale networking endpoints + DefaultL2ELEndpointTailscale = "https://proxyd-l2-sepolia.primary.client.dev.oplabs.cloud" + DefaultL1CLBeaconEndpointTailscale = "https://beacon-api-proxy-sepolia.primary.client.dev.oplabs.cloud" + DefaultL1ELEndpointTailscale = "https://proxyd-l1-sepolia.primary.client.dev.oplabs.cloud" +) + +var ( + // Network presets for different networks against which we test op-node syncing + networkPresets = map[string]stack.ExtNetworkConfig{ + "op-sepolia": { + L2NetworkName: "op-sepolia", + L1ChainID: eth.ChainIDFromUInt64(11155111), + L2ELEndpoint: "https://ci-sepolia-l2.optimism.io", + L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", + L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", + }, + "base-sepolia": { + L2NetworkName: "base-sepolia", + L1ChainID: eth.ChainIDFromUInt64(11155111), + L2ELEndpoint: "https://base-sepolia-rpc.optimism.io", + L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", + L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", + }, + "unichain-sepolia": { + L2NetworkName: "unichain-sepolia", + L1ChainID: eth.ChainIDFromUInt64(11155111), + L2ELEndpoint: "https://unichain-sepolia-rpc.optimism.io", + L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", + L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", + }, + "op-mainnet": { + L2NetworkName: "op-mainnet", + L1ChainID: eth.ChainIDFromUInt64(1), + L2ELEndpoint: "https://op-mainnet-rpc.optimism.io", + L1CLBeaconEndpoint: "https://ci-mainnet-beacon.optimism.io", + L1ELEndpoint: "https://ci-mainnet-l1.optimism.io", + }, + "base-mainnet": { + L2NetworkName: "base-mainnet", + L1ChainID: eth.ChainIDFromUInt64(1), + L2ELEndpoint: "https://base-mainnet-rpc.optimism.io", + L1CLBeaconEndpoint: "https://ci-mainnet-beacon.optimism.io", + L1ELEndpoint: "https://ci-mainnet-l1.optimism.io", + }, + } + L2CLSyncMode = getSyncMode("L2_CL_SYNCMODE") +) + +func getSyncMode(envVar string) sync.Mode { + if value := os.Getenv(envVar); value == sync.ELSyncString { + return sync.ELSync + } + return sync.CLSync +} + +func TestSyncTesterExtEL(gt *testing.T) { + t := devtest.SerialT(gt) + + if os.Getenv("CIRCLECI_PIPELINE_SCHEDULE_NAME") != "build_daily" && os.Getenv("CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH") != "true" { + t.Skipf("TestSyncTesterExtEL only runs on daily scheduled pipeline jobs: schedule=%s dispatch=%s", os.Getenv("CIRCLECI_PIPELINE_SCHEDULE_NAME"), os.Getenv("CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH")) + } + + l := t.Logger() + require := t.Require() + blocksToSync := uint64(20) + sys, target := setupSystem(gt, t, blocksToSync) + + attempts := 500 + if L2CLSyncMode == sync.ELSync { + // After EL Sync is finished, the FCU state will advance to target immediately so less attempts + attempts = 5 + // Signal L2CL for triggering EL Sync + sys.L2CL.SignalTarget(sys.L2ELReadOnly, target) + } + + // Test that we can get sync status from L2CL node + l2CLSyncStatus := sys.L2CL.SyncStatus() + require.NotNil(l2CLSyncStatus, "L2CL should have sync status") + + sys.L2CL.Reached(types.LocalUnsafe, target, attempts) + + l2CLSyncStatus = sys.L2CL.SyncStatus() + require.NotNil(l2CLSyncStatus, "L2CL should have sync status") + + unsafeL2Ref := l2CLSyncStatus.UnsafeL2 + blk := sys.L2EL.BlockRefByNumber(unsafeL2Ref.Number) + require.Equal(unsafeL2Ref.Hash, blk.Hash, "L2EL should be on the same block as L2CL") + + stSessions := sys.SyncTester.ListSessions() + require.Equal(len(stSessions), 1, "expect exactly one session") + + stSession := sys.SyncTester.GetSession(stSessions[0]) + require.GreaterOrEqual(stSession.CurrentState.Latest, stSession.InitialState.Latest+blocksToSync, "SyncTester session Latest should be on the same block as L2CL") + require.GreaterOrEqual(stSession.CurrentState.Safe, stSession.InitialState.Safe+blocksToSync, "SyncTester session Safe should be on the same block as L2CL") + + l.Info("SyncTester ExtEL test completed successfully", "l2cl_chain_id", sys.L2CL.ID().ChainID(), "l2cl_sync_status", l2CLSyncStatus) +} + +// setupSystem initializes the system for the test and returns the system and the target block number of the session +func setupSystem(gt *testing.T, t devtest.T, blocksToSync uint64) (*presets.MinimalExternalEL, uint64) { + // Initialize orchestrator + orch, target := setupOrchestrator(gt, t, blocksToSync) + system := shim.NewSystem(t) + orch.Hydrate(system) + + // Extract the system components + l2 := system.L2Network(match.L2ChainA) + verifierCL := l2.L2CLNode(match.FirstL2CL) + syncTester := l2.SyncTester(match.FirstSyncTester) + + sys := &presets.MinimalExternalEL{ + Log: t.Logger(), + T: t, + ControlPlane: orch.ControlPlane(), + L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), + L1EL: dsl.NewL1ELNode(system.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL)), + L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), + L2CL: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), + L2ELReadOnly: dsl.NewL2ELNode(l2.L2ELNode(match.FirstL2EL), orch.ControlPlane()), + L2EL: dsl.NewL2ELNode(l2.L2ELNode(match.SecondL2EL), orch.ControlPlane()), + SyncTester: dsl.NewSyncTester(syncTester), + } + + return sys, target +} + +// setupOrchestrator initializes and configures the orchestrator for the test and returns the orchestrator and the target block number of the session +func setupOrchestrator(gt *testing.T, t devtest.T, blocksToSync uint64) (*sysgo.Orchestrator, uint64) { + l := t.Logger() + ctx := t.Ctx() + require := t.Require() + + config := networkPresets[DefaultNetworkPreset] + + // Override configuration with Tailscale endpoints if Tailscale networking is enabled + if os.Getenv("TAILSCALE_NETWORKING") == "true" { + config.L2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT_TAILSCALE", DefaultL2ELEndpointTailscale) + config.L1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT_TAILSCALE", DefaultL1CLBeaconEndpointTailscale) + config.L1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT_TAILSCALE", DefaultL1ELEndpointTailscale) + } + + if os.Getenv("NETWORK_PRESET") != "" { + var ok bool + config, ok = networkPresets[os.Getenv("NETWORK_PRESET")] + if !ok { + gt.Errorf("NETWORK_PRESET %s not found", os.Getenv("NETWORK_PRESET")) + } + } + + // Runtime configuration values + l.Info("Runtime configuration values for TestSyncTesterExtEL") + l.Info("NETWORK_PRESET", "value", os.Getenv("NETWORK_PRESET")) + l.Info("L2_NETWORK_NAME", "value", config.L2NetworkName) + l.Info("L1_CHAIN_ID", "value", config.L1ChainID) + l.Info("L2_EL_ENDPOINT", "value", config.L2ELEndpoint) + l.Info("L1_CL_BEACON_ENDPOINT", "value", config.L1CLBeaconEndpoint) + l.Info("L1_EL_ENDPOINT", "value", config.L1ELEndpoint) + l.Info("TAILSCALE_NETWORKING", "value", os.Getenv("TAILSCALE_NETWORKING")) + l.Info("L2_CL_SYNCMODE", "value", L2CLSyncMode) + + // Setup orchestrator + logger := testlog.Logger(gt, log.LevelInfo) + onFail := func(now bool) { + if now { + gt.FailNow() + } else { + gt.Fail() + } + } + onSkipNow := func() { + gt.SkipNow() + } + p := devtest.NewP(ctx, logger, onFail, onSkipNow) + gt.Cleanup(p.Close) + + // Fetch the latest block number from the remote L2EL node + cl, err := ethclient.DialContext(ctx, config.L2ELEndpoint) + require.NoError(err) + latestBlock, err := cl.BlockByNumber(ctx, nil) + require.NoError(err) + + initial := latestBlock.NumberU64() - 1000 + target := initial + blocksToSync + l.Info("LATEST_BLOCK", "latest_block", latestBlock.NumberU64(), "session_initial_block", initial, "target_block", target) + + opt := presets.WithExternalELWithSuperchainRegistry(config) + if L2CLSyncMode == sync.ELSync { + chainCfg := chaincfg.ChainByName(config.L2NetworkName) + if chainCfg == nil { + panic(fmt.Sprintf("network %s not found in superchain registry", config.L2NetworkName)) + } + opt = stack.Combine(opt, + presets.WithExecutionLayerSyncOnVerifiers(), + presets.WithELSyncTarget(target), + presets.WithSyncTesterELInitialState(eth.FCUState{ + Latest: initial, + Safe: 0, + // Need to set finalized to genesis to unskip EL Sync + Finalized: chainCfg.Genesis.L2.Number, + }), + ) + } else { + opt = stack.Combine(opt, + presets.WithSyncTesterELInitialState(eth.FCUState{ + Latest: initial, + Safe: initial, + Finalized: initial, + }), + ) + } + + var orch stack.Orchestrator = sysgo.NewOrchestrator(p, stack.SystemHook(opt)) + stack.ApplyOptionLifecycle(opt, orch) + + return orch.(*sysgo.Orchestrator), target +} + +// getEnvOrDefault returns the environment variable value or the default if not set +func getEnvOrDefault(envVar, defaultValue string) string { + if value := os.Getenv(envVar); value != "" { + return value + } + return defaultValue +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go new file mode 100644 index 0000000000000..b209502c609f9 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go @@ -0,0 +1,25 @@ +package sync_tester_hfs + +import ( + "testing" + + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, presets.WithSimpleWithSyncTester(), + presets.WithCompatibleTypes(compat.SysGo), + presets.WithHardforkSequentialActivation(rollup.Bedrock, rollup.Jovian, 15), + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + // For supporting pre-delta batches + cfg.BatchType = derive.SingularBatchType + // For supporting pre-Fjord batches + cfg.CompressionAlgo = derive.Zlib + }))) +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go new file mode 100644 index 0000000000000..ebd67346a6286 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go @@ -0,0 +1,34 @@ +package sync_tester_hfs + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestSyncTesterHardforks(gt *testing.T) { + t := devtest.SerialT(gt) + + sys := presets.NewSimpleWithSyncTester(t) + require := t.Require() + + // Hardforks will be activated from Bedrock to Isthmus, 9 hardforks with 15 second time delta between. + // 15 * 9 = 135s, so we need at least 69 (135 / 2 + 1) L2 blocks with block time 2 to make the CL experience scheduled hardforks. + targetNum := 70 + dsl.CheckAll(t, + sys.L2CL.AdvancedFn(types.LocalUnsafe, uint64(targetNum), targetNum*2+10), + sys.L2CL2.AdvancedFn(types.LocalUnsafe, uint64(targetNum), targetNum*2+10), + ) + + current := sys.L2CL2.HeadBlockRef(types.LocalUnsafe) + + // Check the L2CL passed configured hardforks + isthmusTime := sys.L2Chain.Escape().ChainConfig().IsthmusTime + require.NotNil(isthmusTime, "isthmus must be activated") + require.Greater(current.Time, *isthmusTime, "must pass isthmus block") + // Check block hash state from L2CL2 which was synced using the sync tester + require.Equal(sys.L2EL.BlockRefByNumber(current.Number).Hash, current.Hash, "hash mismatch") +} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go new file mode 100644 index 0000000000000..1cb2a8dadf515 --- /dev/null +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go @@ -0,0 +1,286 @@ +package sync_tester_hfs_ext + +import ( + "context" + "fmt" + "os" + "strconv" + "testing" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +// Configuration defaults for op-sepolia +const ( + DefaultL2NetworkName = "op-sepolia" + DefaultL1ChainID = 11155111 + DefaultL2ELEndpoint = "https://ci-sepolia-l2.optimism.io" + DefaultL1CLBeaconEndpoint = "https://ci-sepolia-beacon.optimism.io" + DefaultL1ELEndpoint = "https://ci-sepolia-l1.optimism.io" + + // Tailscale networking endpoints + DefaultL2ELEndpointTailscale = "https://proxyd-l2-sepolia.primary.client.dev.oplabs.cloud" + DefaultL1CLBeaconEndpointTailscale = "https://beacon-api-proxy-sepolia.primary.client.dev.oplabs.cloud" + DefaultL1ELEndpointTailscale = "https://proxyd-l1-sepolia.primary.client.dev.oplabs.cloud" +) + +var ( + // Network upgrade block numbers for op-sepolia + networkUpgradeBlocks = map[rollup.ForkName]uint64{ + rollup.Canyon: 4089330, + rollup.Delta: 5700330, + rollup.Ecotone: 8366130, + rollup.Fjord: 12597930, + rollup.Granite: 15837930, + rollup.Holocene: 20415330, + rollup.Isthmus: 26551530, + } + + // Load configuration from environment variables with defaults + L2NetworkName = getEnvOrDefault("L2_NETWORK_NAME", DefaultL2NetworkName) + L1ChainID = eth.ChainIDFromUInt64(getEnvUint64OrDefault("L1_CHAIN_ID", DefaultL1ChainID)) + + // Default endpoints + L2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT", DefaultL2ELEndpoint) + L1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT", DefaultL1CLBeaconEndpoint) + L1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT", DefaultL1ELEndpoint) +) + +func TestSyncTesterHFS_Canyon_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Canyon, sync.CLSync) +} + +func TestSyncTesterHFS_Canyon_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Canyon, sync.ELSync) +} + +func TestSyncTesterHFS_Delta_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Delta, sync.CLSync) +} + +func TestSyncTesterHFS_Delta_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Delta, sync.ELSync) +} + +func TestSyncTesterHFS_Ecotone_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Ecotone, sync.CLSync) +} + +func TestSyncTesterHFS_Ecotone_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Ecotone, sync.ELSync) +} + +func TestSyncTesterHFS_Fjord_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Fjord, sync.CLSync) +} + +func TestSyncTesterHFS_Fjord_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Fjord, sync.ELSync) +} + +func TestSyncTesterHFS_Granite_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Granite, sync.CLSync) +} + +func TestSyncTesterHFS_Granite_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Granite, sync.ELSync) +} + +func TestSyncTesterHFS_Holocene_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Holocene, sync.CLSync) +} + +func TestSyncTesterHFS_Holocene_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Holocene, sync.ELSync) +} + +func TestSyncTesterHFS_Isthmus_CLSync(gt *testing.T) { + hfsExt(gt, rollup.Isthmus, sync.CLSync) +} + +func TestSyncTesterHFS_Isthmus_ELSync(gt *testing.T) { + hfsExt(gt, rollup.Isthmus, sync.ELSync) +} + +// getEnvOrDefault returns the environment variable value or the default if not set +func getEnvOrDefault(envVar, defaultValue string) string { + if value := os.Getenv(envVar); value != "" { + return value + } + return defaultValue +} + +// getEnvUint64OrDefault returns the environment variable value as uint64 or the default if not set +func getEnvUint64OrDefault(envVar string, defaultValue uint64) uint64 { + if value := os.Getenv(envVar); value != "" { + if parsed, err := strconv.ParseUint(value, 10, 64); err == nil { + return parsed + } + } + return defaultValue +} + +// setupOrchestrator initializes and configures the orchestrator for the test +func setupOrchestrator(gt *testing.T, t devtest.T, blk, targetBlock uint64, l2CLSyncMode sync.Mode) *sysgo.Orchestrator { + l := t.Logger() + + // Override configuration with Tailscale endpoints if Tailscale networking is enabled + l2ELEndpoint := L2ELEndpoint + l1CLBeaconEndpoint := L1CLBeaconEndpoint + l1ELEndpoint := L1ELEndpoint + + if os.Getenv("TAILSCALE_NETWORKING") == "true" { + l2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT_TAILSCALE", DefaultL2ELEndpointTailscale) + l1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT_TAILSCALE", DefaultL1CLBeaconEndpointTailscale) + l1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT_TAILSCALE", DefaultL1ELEndpointTailscale) + } + + // Setup orchestrator directly without TestMain + logger := testlog.Logger(gt, log.LevelInfo) + onFail := func(now bool) { + if now { + gt.FailNow() + } else { + gt.Fail() + } + } + onSkipNow := func() { + gt.SkipNow() + } + p := devtest.NewP(context.Background(), logger, onFail, onSkipNow) + gt.Cleanup(p.Close) + + // Runtime configuration values + l.Info("Runtime configuration values for TestSyncTesterExtEL") + l.Info("L2_NETWORK_NAME", "value", L2NetworkName) + l.Info("L1_CHAIN_ID", "value", L1ChainID) + l.Info("L2_EL_ENDPOINT", "value", l2ELEndpoint) + l.Info("L1_CL_BEACON_ENDPOINT", "value", l1CLBeaconEndpoint) + l.Info("L1_EL_ENDPOINT", "value", l1ELEndpoint) + l.Info("TAILSCALE_NETWORKING", "value", os.Getenv("TAILSCALE_NETWORKING")) + l.Info("L2_CL_SYNCMODE", "value", l2CLSyncMode) + + config := stack.ExtNetworkConfig{ + L2NetworkName: L2NetworkName, + L1ChainID: L1ChainID, + L2ELEndpoint: L2ELEndpoint, + L1CLBeaconEndpoint: L1CLBeaconEndpoint, + L1ELEndpoint: L1ELEndpoint, + } + + // Create orchestrator with the same configuration that was in TestMain + opt := presets.WithExternalELWithSuperchainRegistry(config) + if l2CLSyncMode == sync.ELSync { + chainCfg := chaincfg.ChainByName(config.L2NetworkName) + if chainCfg == nil { + panic(fmt.Sprintf("network %s not found in superchain registry", config.L2NetworkName)) + } + opt = stack.Combine(opt, + presets.WithExecutionLayerSyncOnVerifiers(), + presets.WithELSyncTarget(targetBlock), + presets.WithSyncTesterELInitialState(eth.FCUState{ + Latest: blk, + Safe: 0, + // Need to set finalized to genesis to unskip EL Sync + Finalized: chainCfg.Genesis.L2.Number, + }), + ) + } else { + opt = stack.Combine(opt, + presets.WithSyncTesterELInitialState(eth.FCUState{ + Latest: blk, + Safe: blk, + Finalized: blk, + }), + ) + } + + var orch stack.Orchestrator = sysgo.NewOrchestrator(p, stack.SystemHook(opt)) + stack.ApplyOptionLifecycle(opt, orch) + + return orch.(*sysgo.Orchestrator) +} + +func hfsExt(gt *testing.T, upgradeName rollup.ForkName, l2CLSyncMode sync.Mode) { + t := devtest.ParallelT(gt) + l := t.Logger() + + // Initial block number to sync from before the upgrade + blk := networkUpgradeBlocks[upgradeName] - 5 + + blocksToSync := uint64(10) + targetBlock := blk + blocksToSync + // Initialize orchestrator + + orch := setupOrchestrator(gt, t, blk, targetBlock, l2CLSyncMode) + system := shim.NewSystem(t) + orch.Hydrate(system) + + l2 := system.L2Network(match.L2ChainA) + verifierCL := l2.L2CLNode(match.FirstL2CL) + syncTester := l2.SyncTester(match.FirstSyncTester) + + sys := &struct { + L2CL *dsl.L2CLNode + L2ELReadOnly *dsl.L2ELNode + L2EL *dsl.L2ELNode + SyncTester *dsl.SyncTester + L2 *dsl.L2Network + }{ + L2CL: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), + L2ELReadOnly: dsl.NewL2ELNode(l2.L2ELNode(match.FirstL2EL), orch.ControlPlane()), + L2EL: dsl.NewL2ELNode(l2.L2ELNode(match.SecondL2EL), orch.ControlPlane()), + SyncTester: dsl.NewSyncTester(syncTester), + L2: dsl.NewL2Network(l2, orch.ControlPlane()), + } + require := t.Require() + + ft := sys.L2.Escape().RollupConfig().ActivationTimeFor(upgradeName) + var l2CLSyncStatus *eth.SyncStatus + attempts := 1000 + if l2CLSyncMode == sync.ELSync { + // After EL Sync is finished, the FCU state will advance to target immediately so less attempts + attempts = 5 + // Signal L2CL for finishing EL Sync + sys.L2CL.SignalTarget(sys.L2ELReadOnly, targetBlock) + } else { + l2CLSyncStatus := sys.L2CL.WaitForNonZeroUnsafeTime(t.Ctx()) + require.Less(l2CLSyncStatus.UnsafeL2.Time, *ft, "L2CL unsafe time should be less than fork timestamp before upgrade") + } + + sys.L2CL.Reached(types.LocalUnsafe, targetBlock, attempts) + l.Info("L2CL unsafe reached", "targetBlock", targetBlock, "upgrade_name", upgradeName) + sys.L2CL.Reached(types.LocalSafe, targetBlock, attempts) + l.Info("L2CL safe reached", "targetBlock", targetBlock, "upgrade_name", upgradeName) + + l2CLSyncStatus = sys.L2CL.SyncStatus() + require.NotNil(l2CLSyncStatus, "L2CL should have sync status") + require.Greater(l2CLSyncStatus.UnsafeL2.Time, *ft, "L2CL unsafe time should be greater than fork timestamp after upgrade") + + unsafeL2Ref := l2CLSyncStatus.UnsafeL2 + ref := sys.L2EL.BlockRefByNumber(unsafeL2Ref.Number) + require.Equal(unsafeL2Ref.Hash, ref.Hash, "L2EL should be on the same block as L2CL") + + stSessions := sys.SyncTester.ListSessions() + require.Equal(len(stSessions), 1, "expect exactly one session") + + stSession := sys.SyncTester.GetSession(stSessions[0]) + require.GreaterOrEqualf(stSession.CurrentState.Latest, stSession.InitialState.Latest+blocksToSync, "SyncTester session CurrentState.Latest only advanced %d", stSession.CurrentState.Latest-stSession.InitialState.Latest) + require.GreaterOrEqualf(stSession.CurrentState.Safe, stSession.InitialState.Safe+blocksToSync, "SyncTester session CurrentState.Safe only advanced %d", stSession.CurrentState.Safe-stSession.InitialState.Safe) + + l.Info("SyncTester HFS Ext test completed successfully", "l2cl_chain_id", sys.L2CL.ID().ChainID(), "l2cl_sync_status", l2CLSyncStatus, "upgrade_name", upgradeName) +} diff --git a/op-batcher/batcher/channel.go b/op-batcher/batcher/channel.go index 6b936c112d346..8270977a68c57 100644 --- a/op-batcher/batcher/channel.go +++ b/op-batcher/batcher/channel.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -166,7 +165,7 @@ func (c *channel) CheckTimeout(l1BlockNum uint64) { c.channelBuilder.CheckTimeout(l1BlockNum) } -func (c *channel) AddBlock(block *types.Block) (*derive.L1BlockInfo, error) { +func (c *channel) AddBlock(block SizedBlock) (*derive.L1BlockInfo, error) { return c.channelBuilder.AddBlock(block) } diff --git a/op-batcher/batcher/channel_builder.go b/op-batcher/batcher/channel_builder.go index 56069e5bf1603..c9f29283cd94d 100644 --- a/op-batcher/batcher/channel_builder.go +++ b/op-batcher/batcher/channel_builder.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/queue" - "github.com/ethereum/go-ethereum/core/types" ) var ( @@ -66,7 +65,7 @@ type ChannelBuilder struct { // current channel co derive.ChannelOut // list of blocks in the channel. Saved in case the channel must be rebuilt - blocks queue.Queue[*types.Block] + blocks queue.Queue[SizedBlock] // latestL1Origin is the latest L1 origin of all the L2 blocks that have been added to the channel latestL1Origin eth.BlockID // oldestL1Origin is the oldest L1 origin of all the L2 blocks that have been added to the channel @@ -136,7 +135,7 @@ func (c *ChannelBuilder) OutputBytes() int { // Blocks returns a backup list of all blocks that were added to the channel. It // can be used in case the channel needs to be rebuilt. -func (c *ChannelBuilder) Blocks() []*types.Block { +func (c *ChannelBuilder) Blocks() []SizedBlock { return c.blocks } @@ -171,12 +170,12 @@ func (c *ChannelBuilder) OldestL2() eth.BlockID { // first transaction for subsequent use by the caller. // // Call OutputFrames() afterwards to create frames. -func (c *ChannelBuilder) AddBlock(block *types.Block) (*derive.L1BlockInfo, error) { +func (c *ChannelBuilder) AddBlock(block SizedBlock) (*derive.L1BlockInfo, error) { if c.IsFull() { return nil, c.FullErr() } - l1info, err := c.co.AddBlock(c.rollupCfg, block) + l1info, err := c.co.AddBlock(c.rollupCfg, block.Block) if errors.Is(err, derive.ErrTooManyRLPBytes) || errors.Is(err, derive.ErrCompressorFull) { c.setFullErr(err) return l1info, c.FullErr() diff --git a/op-batcher/batcher/channel_builder_test.go b/op-batcher/batcher/channel_builder_test.go index 70cae73e1335e..becb123c6671a 100644 --- a/op-batcher/batcher/channel_builder_test.go +++ b/op-batcher/batcher/channel_builder_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" "github.com/stretchr/testify/require" @@ -44,7 +45,7 @@ func newChannelBuilder(cfg ChannelConfig, rollupCfg *rollup.Config, latestL1Orig // ChannelBuilder.AddBlock method. func addMiniBlock(cb *ChannelBuilder) error { a := newMiniL2Block(0) - _, err := cb.AddBlock(a) + _, err := cb.AddBlock(SizedBlock{Block: a}) return err } @@ -109,7 +110,7 @@ func newMiniL2BlockWithChainIDNumberParentAndL1Information(numTx int, chainID *b Number: big.NewInt(l1Number), Time: blockTime, }, nil, nil, trie.NewStackTrie(nil), types.DefaultBlockConfig) - l1InfoTx, err := derive.L1InfoDeposit(rollupConfig, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), blockTime) + l1InfoTx, err := derive.L1InfoDeposit(rollupConfig, params.MergedTestChainConfig, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), blockTime) if err != nil { panic(err) } @@ -156,7 +157,7 @@ func addTooManyBlocks(cb *ChannelBuilder, blockCount int) (int, error) { for i := 0; i < blockCount; i++ { block := dtest.RandomL2BlockWithChainIdAndTime(rng, 1000, defaultTestRollupConfig.L2ChainID, t.Add(time.Duration(i)*time.Second)) - _, err := cb.AddBlock(block) + _, err := cb.AddBlock(SizedBlock{Block: block}) if err != nil { return i + 1, err } @@ -651,7 +652,7 @@ func ChannelBuilder_OutputFramesMaxFrameIndex(t *testing.T, batchType uint) { ti := time.Now() for i := 0; ; i++ { a := dtest.RandomL2BlockWithChainIdAndTime(rng, 1000, defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second)) - _, err = cb.AddBlock(a) + _, err = cb.AddBlock(SizedBlock{Block: a}) if cb.IsFull() { fullErr := cb.FullErr() require.ErrorIs(t, fullErr, derive.ErrCompressorFull) @@ -685,9 +686,9 @@ func TestChannelBuilder_FullShadowCompressor(t *testing.T) { rng := rand.New(rand.NewSource(420)) a := dtest.RandomL2BlockWithChainId(rng, 1, defaultTestRollupConfig.L2ChainID) - _, err = cb.AddBlock(a) + _, err = cb.AddBlock(SizedBlock{Block: a}) require.NoError(err) - _, err = cb.AddBlock(a) + _, err = cb.AddBlock(SizedBlock{Block: a}) require.ErrorIs(err, derive.ErrCompressorFull) // without fix, adding the second block would succeed and then adding a // third block would fail with full error and the compressor would be full. @@ -807,19 +808,19 @@ func TestChannelBuilder_LatestL1Origin(t *testing.T) { require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.LatestL1Origin()) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.LatestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.LatestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)}) require.NoError(t, err) require.Equal(t, uint64(2), cb.LatestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)}) require.NoError(t, err) require.Equal(t, uint64(2), cb.LatestL1Origin().Number) } @@ -829,19 +830,19 @@ func TestChannelBuilder_OldestL1Origin(t *testing.T) { require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.OldestL1Origin()) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL1Origin().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL1Origin().Number) } @@ -851,19 +852,19 @@ func TestChannelBuilder_LatestL2(t *testing.T) { require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.LatestL2()) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.LatestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(2), cb.LatestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)}) require.NoError(t, err) require.Equal(t, uint64(3), cb.LatestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)}) require.NoError(t, err) require.Equal(t, uint64(3), cb.LatestL2().Number) } @@ -873,19 +874,19 @@ func TestChannelBuilder_OldestL2(t *testing.T) { require.NoError(t, err) require.Equal(t, eth.BlockID{}, cb.OldestL2()) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(1), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(2), common.Hash{}, 1, 100)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 2, 110)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL2().Number) - _, err = cb.AddBlock(newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)) + _, err = cb.AddBlock(SizedBlock{Block: newMiniL2BlockWithNumberParentAndL1Information(0, big.NewInt(3), common.Hash{}, 1, 110)}) require.NoError(t, err) require.Equal(t, uint64(1), cb.OldestL2().Number) } @@ -910,7 +911,7 @@ func ChannelBuilder_PendingFrames_TotalFrames(t *testing.T, batchType uint) { // fill up for i := 0; ; i++ { block := dtest.RandomL2BlockWithChainIdAndTime(rng, 4, defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second)) - _, err := cb.AddBlock(block) + _, err := cb.AddBlock(SizedBlock{Block: block}) if cb.IsFull() { break } @@ -967,7 +968,7 @@ func ChannelBuilder_InputBytes(t *testing.T, batchType uint) { require.NoError(batch.EncodeRLP(&buf)) l = buf.Len() } - _, err := cb.AddBlock(block) + _, err := cb.AddBlock(SizedBlock{Block: block}) require.NoError(err) require.Equal(cb.InputBytes(), l) } @@ -989,7 +990,7 @@ func ChannelBuilder_OutputBytes(t *testing.T, batchType uint) { ti := time.Now() for i := 0; ; i++ { block := dtest.RandomL2BlockWithChainIdAndTime(rng, rng.Intn(32), defaultTestRollupConfig.L2ChainID, ti.Add(time.Duration(i)*time.Second)) - _, err := cb.AddBlock(block) + _, err := cb.AddBlock(SizedBlock{Block: block}) if errors.Is(err, derive.ErrCompressorFull) { break } diff --git a/op-batcher/batcher/channel_manager.go b/op-batcher/batcher/channel_manager.go index 62c8e8b89f666..b933de454f2f9 100644 --- a/op-batcher/batcher/channel_manager.go +++ b/op-batcher/batcher/channel_manager.go @@ -36,7 +36,7 @@ type channelManager struct { outFactory ChannelOutFactory // All blocks which are not yet safe - blocks queue.Queue[*types.Block] + blocks queue.Queue[SizedBlock] // blockCursor is an index into blocks queue. It points at the next block // to build a channel with. blockCursor = len(blocks) is reserved for when // there are no blocks ready to build with. @@ -143,7 +143,7 @@ func (s *channelManager) rewindToBlock(block eth.BlockID) { if !ok { panic("rewindToBlock: block not found at index " + fmt.Sprint(i)) } - s.metr.RecordL2BlockInPendingQueue(block) + s.metr.RecordL2BlockInPendingQueue(block.RawSize(), block.EstimatedDABytes()) } } @@ -225,8 +225,8 @@ func (s *channelManager) nextTxData(channel *channel) (txData, error) { // It will decide whether to switch DA type automatically. // When switching DA type, the channelManager state will be rebuilt // with a new ChannelConfig. -func (s *channelManager) TxData(l1Head eth.BlockID, isPectra, isThrottling bool) (txData, error) { - channel, err := s.getReadyChannel(l1Head) +func (s *channelManager) TxData(l1Head eth.BlockID, isPectra, isThrottling, forcePublish bool) (txData, error) { + channel, err := s.getReadyChannel(l1Head, forcePublish) if err != nil { return emptyTxData, err } @@ -260,7 +260,7 @@ func (s *channelManager) TxData(l1Head eth.BlockID, isPectra, isThrottling bool) s.defaultCfg = newCfg // Try again to get data to send on chain. - channel, err = s.getReadyChannel(l1Head) + channel, err = s.getReadyChannel(l1Head, forcePublish) if err != nil { return emptyTxData, err } @@ -273,7 +273,18 @@ func (s *channelManager) TxData(l1Head eth.BlockID, isPectra, isThrottling bool) // to the current channel and generates frames for it. // Always returns nil and the io.EOF sentinel error when // there is no channel with txData -func (s *channelManager) getReadyChannel(l1Head eth.BlockID) (*channel, error) { +// If forcePublish is true, it will force close channels and +// generate frames for them. +func (s *channelManager) getReadyChannel(l1Head eth.BlockID, forcePublish bool) (*channel, error) { + + if forcePublish && s.currentChannel.TotalFrames() == 0 { + s.log.Info("Force-closing channel and creating frames", "channel_id", s.currentChannel.ID()) + s.currentChannel.Close() + if err := s.currentChannel.OutputFrames(); err != nil { + return nil, err + } + } + var firstWithTxData *channel for _, ch := range s.channelQueue { if ch.HasTxData() { @@ -413,8 +424,8 @@ func (s *channelManager) processBlocks() error { s.log.Debug("Added block to channel", "id", s.currentChannel.ID(), "block", eth.ToBlockID(block)) blocksAdded += 1 - latestL2ref = l2BlockRefFromBlockAndL1Info(block, l1info) - s.metr.RecordL2BlockInChannel(block) + latestL2ref = l2BlockRefFromBlockAndL1Info(block.Block, l1info) + s.metr.RecordL2BlockInChannel(block.RawSize(), block.EstimatedDABytes()) // current block got added but channel is now full if s.currentChannel.IsFull() { break @@ -474,8 +485,9 @@ func (s *channelManager) AddL2Block(block *types.Block) error { return ErrReorg } - s.metr.RecordL2BlockInPendingQueue(block) - s.blocks.Enqueue(block) + b := ToSizedBlock(block) + s.metr.RecordL2BlockInPendingQueue(b.RawSize(), b.EstimatedDABytes()) + s.blocks.Enqueue(b) s.tip = block.Hash() return nil @@ -496,12 +508,21 @@ var ErrPendingAfterClose = errors.New("pending channels remain after closing cha // PruneSafeBlocks dequeues the provided number of blocks from the internal blocks queue func (s *channelManager) PruneSafeBlocks(num int) { - _, ok := s.blocks.DequeueN(int(num)) + discardedBlocks, ok := s.blocks.DequeueN(int(num)) if !ok { panic("tried to prune more blocks than available") } s.blockCursor -= int(num) if s.blockCursor < 0 { + // This is a rare edge case where a block is loaded and pruned before it gets into a channel. + // This may happen if a previous batcher instance build a channel with that block + // which was confirmed _after_ the current batcher pulled it from the sequencer. + numDiscardedPendingBlocks := -1 * s.blockCursor + for i := 0; i < numDiscardedPendingBlocks; i++ { + s.metr.RecordPendingBlockPruned( + discardedBlocks[i].RawSize(), + discardedBlocks[i].EstimatedDABytes()) + } s.blockCursor = 0 } } @@ -541,3 +562,44 @@ func (m *channelManager) LastStoredBlock() eth.BlockID { } return eth.ToBlockID(m.blocks[m.blocks.Len()-1]) } + +func (s *channelManager) UnsafeDABytes() int64 { + return s.unsafeBytesInPendingBlocks() + s.unsafeBytesInOpenChannels() + s.unsafeBytesInClosedChannels() +} + +func (s *channelManager) unsafeBytesInPendingBlocks() int64 { + var bytesNotYetInChannels int64 + for _, block := range s.blocks[s.blockCursor:] { + bytesNotYetInChannels += int64(block.EstimatedDABytes()) + } + return bytesNotYetInChannels +} + +func (s *channelManager) unsafeBytesInOpenChannels() int64 { + // In theory, an open channel can provide accurate estimate of + // the DA bytes in the channel so far. However, in practice, + // the compressors we use can only provide such an estimate in a + // way which leads to a worse compression ratio. So increased + // observability actually hurts the bottom line. + // Therefore, for now just use a block-by-block estimate which should match + // the estimate for the blocks before they were added. + var bytesInOpenChannels int64 + for _, channel := range s.channelQueue { + if channel.TotalFrames() == 0 { + for _, block := range channel.channelBuilder.blocks { + bytesInOpenChannels += int64(block.EstimatedDABytes()) + } + } + } + return bytesInOpenChannels +} + +func (s *channelManager) unsafeBytesInClosedChannels() int64 { + var bytesInClosedChannels int64 + for _, channel := range s.channelQueue { + if channel.TotalFrames() > 0 { + bytesInClosedChannels += int64(channel.OutputBytes()) + } + } + return bytesInClosedChannels +} diff --git a/op-batcher/batcher/channel_manager_memory_test.go b/op-batcher/batcher/channel_manager_memory_test.go index 7dc097a17ddbf..47b89c6e9a73f 100644 --- a/op-batcher/batcher/channel_manager_memory_test.go +++ b/op-batcher/batcher/channel_manager_memory_test.go @@ -124,7 +124,7 @@ func runMemoryTest(t *testing.T, batchType uint, compressorType string, compress require.NoError(t, m.processBlocks()) // Try to get transaction data to fill channels - _, err := m.TxData(eth.BlockID{}, false, false) + _, err := m.TxData(eth.BlockID{}, false, false, false) // It's okay if there's no data ready (io.EOF) if err != nil && err.Error() != "EOF" { require.NoError(t, err) diff --git a/op-batcher/batcher/channel_manager_test.go b/op-batcher/batcher/channel_manager_test.go index 947784684f82b..c8bce13262ee9 100644 --- a/op-batcher/batcher/channel_manager_test.go +++ b/op-batcher/batcher/channel_manager_test.go @@ -18,6 +18,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -84,7 +86,7 @@ func ChannelManagerReturnsErrReorg(t *testing.T, batchType uint) { require.NoError(t, m.AddL2Block(c)) require.ErrorIs(t, m.AddL2Block(x), ErrReorg) - require.Equal(t, queue.Queue[*types.Block]{a, b, c}, m.blocks) + require.Equal(t, queue.Queue[SizedBlock]{ToSizedBlock(a), ToSizedBlock(b), ToSizedBlock(c)}, m.blocks) } // ChannelManagerReturnsErrReorgWhenDrained ensures that the channel manager @@ -101,9 +103,9 @@ func ChannelManagerReturnsErrReorgWhenDrained(t *testing.T, batchType uint) { require.NoError(t, m.AddL2Block(a)) - _, err := m.TxData(eth.BlockID{}, false, false) + _, err := m.TxData(eth.BlockID{}, false, false, false) require.NoError(t, err) - _, err = m.TxData(eth.BlockID{}, false, false) + _, err = m.TxData(eth.BlockID{}, false, false, false) require.ErrorIs(t, err, io.EOF) require.ErrorIs(t, m.AddL2Block(x), ErrReorg) @@ -175,7 +177,8 @@ func ChannelManager_Clear(t *testing.T, batchType uint) { } // Artificially pump up some metrics which need to be cleared - m.metr.RecordL2BlockInPendingQueue(a) + A := ToSizedBlock(a) + m.metr.RecordL2BlockInPendingQueue(A.RawSize(), A.EstimatedDABytes()) require.NotZero(m.metr.PendingDABytes()) // Clear the channel manager @@ -204,7 +207,7 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { require.NoError(m.AddL2Block(a)) - txdata0, err := m.TxData(eth.BlockID{}, false, false) + txdata0, err := m.TxData(eth.BlockID{}, false, false, false) require.NoError(err) txdata0bytes := txdata0.CallData() data0 := make([]byte, len(txdata0bytes)) @@ -212,13 +215,13 @@ func ChannelManager_TxResend(t *testing.T, batchType uint) { copy(data0, txdata0bytes) // ensure channel is drained - _, err = m.TxData(eth.BlockID{}, false, false) + _, err = m.TxData(eth.BlockID{}, false, false, false) require.ErrorIs(err, io.EOF) // requeue frame m.TxFailed(txdata0.ID()) - txdata1, err := m.TxData(eth.BlockID{}, false, false) + txdata1, err := m.TxData(eth.BlockID{}, false, false, false) require.NoError(err) data1 := txdata1.CallData() @@ -358,10 +361,10 @@ func TestChannelManager_TxData(t *testing.T) { // Seed channel manager with a block rng := rand.New(rand.NewSource(99)) blockA := derivetest.RandomL2BlockWithChainId(rng, 200, defaultTestRollupConfig.L2ChainID) - m.blocks = []*types.Block{blockA} + m.blocks = queue.Queue[SizedBlock]{SizedBlock{Block: blockA}} // Call TxData a first time to trigger blocks->channels pipeline - _, err := m.TxData(eth.BlockID{}, false, false) + _, err := m.TxData(eth.BlockID{}, false, false, false) require.ErrorIs(t, err, io.EOF) // The test requires us to have something in the channel queue @@ -379,8 +382,8 @@ func TestChannelManager_TxData(t *testing.T) { // we get some data to submit var data txData for { - m.blocks = append(m.blocks, blockA) - data, err = m.TxData(eth.BlockID{}, false, false) + m.blocks.Enqueue(SizedBlock{Block: blockA}) + data, err = m.TxData(eth.BlockID{}, false, false, false) if err == nil && data.Len() > 0 { break } @@ -409,12 +412,12 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { // Seed channel manager with blocks rng := rand.New(rand.NewSource(99)) - blockA := derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID) - blockB := derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID) + blockA := ToSizedBlock(derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID)) + blockB := ToSizedBlock(derivetest.RandomL2BlockWithChainId(rng, 10, defaultTestRollupConfig.L2ChainID)) // This is the snapshot of channel manager state we want to reinstate // when we requeue - stateSnapshot := queue.Queue[*types.Block]{blockA, blockB} + stateSnapshot := queue.Queue[SizedBlock]{blockA, blockB} m.blocks = stateSnapshot require.Empty(t, m.channelQueue) require.Equal(t, metrics.ChannelQueueLength, 0) @@ -429,8 +432,8 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { require.Equal(t, metrics.ChannelQueueLength, 1) // Setup initial metrics - metrics.RecordL2BlockInPendingQueue(blockA) - metrics.RecordL2BlockInPendingQueue(blockB) + metrics.RecordL2BlockInPendingQueue(blockA.RawSize(), blockA.EstimatedDABytes()) + metrics.RecordL2BlockInPendingQueue(blockB.RawSize(), blockB.EstimatedDABytes()) pendingBytesBefore := metrics.PendingBlocksBytesCurrent // Trigger the blocks -> channelQueue data pipelining @@ -452,6 +455,7 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { channelToInvalidate := m.currentChannel m.currentChannel.Close() require.NoError(t, m.ensureChannelWithSpace(eth.BlockID{})) + newerChannel := m.currentChannel require.Len(t, m.channelQueue, 3) require.Equal(t, metrics.ChannelQueueLength, 3) require.NoError(t, m.processBlocks()) @@ -463,7 +467,7 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { require.Equal(t, m.blocks, stateSnapshot) require.Contains(t, m.channelQueue, oldChannel) require.NotContains(t, m.channelQueue, channelToInvalidate) - require.NotContains(t, m.channelQueue, newChannel) + require.NotContains(t, m.channelQueue, newerChannel) require.Len(t, m.channelQueue, 1) require.Equal(t, metrics.ChannelQueueLength, 1) @@ -485,95 +489,116 @@ func TestChannelManager_handleChannelInvalidated(t *testing.T) { func TestChannelManager_PruneBlocks(t *testing.T) { cfg := channelManagerTestConfig(100, derive.SingularBatchType) cfg.InitNoneCompressor() - a := types.NewBlock(&types.Header{ + a := SizedBlock{Block: types.NewBlock(&types.Header{ Number: big.NewInt(0), - }, nil, nil, nil, types.DefaultBlockConfig) - b := types.NewBlock(&types.Header{ + }, nil, nil, nil, types.DefaultBlockConfig)} + b := SizedBlock{Block: types.NewBlock(&types.Header{ Number: big.NewInt(1), ParentHash: a.Hash(), - }, nil, nil, nil, types.DefaultBlockConfig) - c := types.NewBlock(&types.Header{ + }, nil, nil, nil, types.DefaultBlockConfig)} + c := SizedBlock{Block: types.NewBlock(&types.Header{ Number: big.NewInt(2), ParentHash: b.Hash(), - }, nil, nil, nil, types.DefaultBlockConfig) + }, nil, nil, nil, types.DefaultBlockConfig)} type testCase struct { - name string - initialQ queue.Queue[*types.Block] - initialBlockCursor int - numChannelsToPrune int - expectedQ queue.Queue[*types.Block] - expectedBlockCursor int + name string + initialQ queue.Queue[SizedBlock] + initialBlockCursor int + numBlocksToPrune int + expectedQ queue.Queue[SizedBlock] + expectedBlockCursor int + expectedPendingBytesDecreases bool } for _, tc := range []testCase{ { name: "[A,B,C]*+1->[B,C]*", // * denotes the cursor - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 3, - numChannelsToPrune: 1, - expectedQ: queue.Queue[*types.Block]{b, c}, + numBlocksToPrune: 1, + expectedQ: queue.Queue[SizedBlock]{b, c}, expectedBlockCursor: 2, }, { name: "[A,B,C*]+1->[B,C*]", - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 2, - numChannelsToPrune: 1, - expectedQ: queue.Queue[*types.Block]{b, c}, + numBlocksToPrune: 1, + expectedQ: queue.Queue[SizedBlock]{b, c}, expectedBlockCursor: 1, }, { name: "[A,B,C]*+2->[C]*", - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 3, - numChannelsToPrune: 2, - expectedQ: queue.Queue[*types.Block]{c}, + numBlocksToPrune: 2, + expectedQ: queue.Queue[SizedBlock]{c}, expectedBlockCursor: 1, }, { name: "[A,B,C*]+2->[C*]", - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 2, - numChannelsToPrune: 2, - expectedQ: queue.Queue[*types.Block]{c}, + numBlocksToPrune: 2, + expectedQ: queue.Queue[SizedBlock]{c}, expectedBlockCursor: 0, }, { - name: "[A*,B,C]+1->[B*,C]", - initialQ: queue.Queue[*types.Block]{a, b, c}, - initialBlockCursor: 0, - numChannelsToPrune: 1, - expectedQ: queue.Queue[*types.Block]{b, c}, - expectedBlockCursor: 0, + name: "[A*,B,C]+1->[B*,C]", + initialQ: queue.Queue[SizedBlock]{a, b, c}, + initialBlockCursor: 0, + numBlocksToPrune: 1, + expectedQ: queue.Queue[SizedBlock]{b, c}, + expectedBlockCursor: 0, + expectedPendingBytesDecreases: true, // we removed a pending block }, { name: "[A,B,C]+3->[]", - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 3, - numChannelsToPrune: 3, - expectedQ: queue.Queue[*types.Block]{}, + numBlocksToPrune: 3, + expectedQ: queue.Queue[SizedBlock]{}, expectedBlockCursor: 0, }, { name: "[A,B,C]*+4->panic", - initialQ: queue.Queue[*types.Block]{a, b, c}, + initialQ: queue.Queue[SizedBlock]{a, b, c}, initialBlockCursor: 3, - numChannelsToPrune: 4, + numBlocksToPrune: 4, expectedQ: nil, // declare that the prune method should panic expectedBlockCursor: 0, }, + { + name: "[A,B,C]+3->[]", + initialQ: queue.Queue[SizedBlock]{a, b, c}, + initialBlockCursor: 2, // we will prune _past_ the block cursor + numBlocksToPrune: 3, + expectedQ: queue.Queue[SizedBlock]{}, + expectedBlockCursor: 0, + expectedPendingBytesDecreases: true, // we removed a pending block + }, } { t.Run(tc.name, func(t *testing.T) { l := testlog.Logger(t, log.LevelCrit) - m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - m.blocks = tc.initialQ + metrics := new(metrics.TestMetrics) + m := NewChannelManager(l, metrics, cfg, defaultTestRollupConfig) + m.blocks = tc.initialQ // not adding blocks via the API so metrics may be inaccurate m.blockCursor = tc.initialBlockCursor + initialPendingDABytes := metrics.PendingDABytes() + initialPendingBlocks := m.pendingBlocks() if tc.expectedQ != nil { - m.PruneSafeBlocks(tc.numChannelsToPrune) + m.PruneSafeBlocks(tc.numBlocksToPrune) require.Equal(t, tc.expectedQ, m.blocks) } else { - require.Panics(t, func() { m.PruneSafeBlocks(tc.numChannelsToPrune) }) + require.Panics(t, func() { m.PruneSafeBlocks(tc.numBlocksToPrune) }) + } + if tc.expectedPendingBytesDecreases { + assert.Less(t, metrics.PendingDABytes(), initialPendingDABytes) + assert.Less(t, m.pendingBlocks(), initialPendingBlocks) + } else { // we should not have removed any blocks + require.Equal(t, metrics.PendingDABytes(), initialPendingDABytes) + require.Equal(t, initialPendingBlocks, m.pendingBlocks()) } }) } @@ -670,3 +695,307 @@ func TestChannelManager_ChannelOutFactory(t *testing.T) { require.IsType(t, &ChannelOutWrapper{}, m.currentChannel.channelBuilder.co) } + +// TestChannelManager_TxData seeds the channel manager with blocks and triggers the +// blocks->channels pipeline once without force publish disabled, and once with force publish enabled. +func TestChannelManager_TxData_ForcePublish(t *testing.T) { + + l := testlog.Logger(t, log.LevelCrit) + cfg := newFakeDynamicEthChannelConfig(l, 1000) + m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + // Seed channel manager with a block + rng := rand.New(rand.NewSource(99)) + blockA := derivetest.RandomL2BlockWithChainId(rng, 200, defaultTestRollupConfig.L2ChainID) + m.blocks = queue.Queue[SizedBlock]{SizedBlock{Block: blockA}} + + // Call TxData a first time to trigger blocks->channels pipeline + txData, err := m.TxData(eth.BlockID{}, false, false, false) + require.ErrorIs(t, err, io.EOF) + require.Zero(t, txData.Len(), 0) + + // The test requires us to have something in the channel queue + // at this point, but not yet ready to send and not full + require.NotEmpty(t, m.channelQueue) + require.False(t, m.channelQueue[0].IsFull()) + + // Call TxData with force publish enabled + txData, err = m.TxData(eth.BlockID{}, false, false, true) + + // Despite no additional blocks being added, we should have tx data: + require.NoError(t, err) + require.NotZero(t, txData.Len(), "txData should not be empty") + + // The channel should be full and ready to send + require.Len(t, m.channelQueue, 1) + require.True(t, m.channelQueue[0].IsFull()) +} + +func newBlock(parent *types.Block, numTransactions int) *types.Block { + var rng *rand.Rand + if parent == nil { + rng = rand.New(rand.NewSource(123)) + } else { + rng = rand.New(rand.NewSource(int64(parent.Header().Number.Uint64()))) + } + block := derivetest.RandomL2BlockWithChainId(rng, numTransactions, defaultTestRollupConfig.L2ChainID) + header := block.Header() + if parent == nil { + header.Number = new(big.Int) + header.ParentHash = common.Hash{} + header.Time = 1675 + } else { + header.Number = big.NewInt(0).Add(parent.Header().Number, big.NewInt(1)) + header.ParentHash = parent.Header().Hash() + header.Time = parent.Header().Time + 2 + } + return types.NewBlock(header, block.Body(), nil, trie.NewStackTrie(nil), types.DefaultBlockConfig) +} + +func newChain(numBlocks int) []*types.Block { + blocks := make([]*types.Block, numBlocks) + blocks[0] = newBlock(nil, 10) + for i := 1; i < numBlocks; i++ { + blocks[i] = newBlock(blocks[i-1], 10) + } + return blocks +} + +// TestChannelManagerUnsafeBytes tests the unsafe bytes in the channel manager +// by adding blocks to the unsafe block queue, adding them to a channel, +// and then sealing the channel. It asserts on the final state of the channel +// manager and tracks the unsafe DA estimate as blocks move through the pipeline. +func TestChannelManagerUnsafeBytes(t *testing.T) { + + type testCase struct { + blocks []*types.Block + batchType uint + compressor string + afterAddingToUnsafeBlockQueue int64 + afterAddingToChannel int64 + afterSealingChannel int64 + } + + a := newBlock(nil, 3) + b := newBlock(a, 3) + c := newBlock(b, 3) + + emptyA := newBlock(nil, 0) + emptyB := newBlock(emptyA, 0) + emptyC := newBlock(emptyB, 0) + + twentyBlocks := newChain(20) + tenBlocks := newChain(10) + + testChannelManagerUnsafeBytes := func(t *testing.T, tc testCase) { + cfg := ChannelConfig{ + MaxFrameSize: 120000 - 1, + TargetNumFrames: 5, + BatchType: tc.batchType, + } + + switch tc.batchType { + case derive.SpanBatchType: + cfg.CompressorConfig.CompressionAlgo = derive.Brotli10 + cfg.CompressorConfig.TargetOutputSize = MaxDataSize(cfg.TargetNumFrames, cfg.MaxFrameSize) + case derive.SingularBatchType: + switch tc.compressor { + case "shadow": + cfg.InitShadowCompressor(derive.Brotli10) + case "ratio": + cfg.InitRatioCompressor(1, derive.Brotli10) + default: + t.Fatalf("unknown compressor: %s", tc.compressor) + } + default: + panic("unknown batch type") + } + + manager := NewChannelManager(log.New(), metrics.NoopMetrics, cfg, defaultTestRollupConfig) + + for _, block := range tc.blocks { + require.NoError(t, manager.AddL2Block(block)) + } + + assert.Equal(t, tc.afterAddingToUnsafeBlockQueue, manager.UnsafeDABytes()) + assert.Equal(t, tc.afterAddingToUnsafeBlockQueue, manager.unsafeBytesInPendingBlocks()) + assert.Zero(t, manager.unsafeBytesInOpenChannels()) + assert.Zero(t, manager.unsafeBytesInClosedChannels()) + + for err := error(nil); err != io.EOF; { + require.NoError(t, err) + _, err = manager.TxData(eth.BlockID{ + Hash: common.Hash{}, + Number: 0, + }, true, false, false) + } + + assert.Equal(t, tc.afterAddingToChannel, manager.UnsafeDABytes()) + assert.Zero(t, manager.unsafeBytesInPendingBlocks()) + assert.Equal(t, tc.afterAddingToChannel, manager.unsafeBytesInOpenChannels()) + assert.Zero(t, manager.unsafeBytesInClosedChannels()) + + manager.currentChannel.Close() + err := manager.currentChannel.OutputFrames() + require.NoError(t, err) + + assert.Equal(t, tc.afterSealingChannel, manager.UnsafeDABytes()) + assert.Zero(t, manager.unsafeBytesInPendingBlocks()) + assert.Zero(t, manager.unsafeBytesInOpenChannels()) + assert.Equal(t, tc.afterSealingChannel, manager.unsafeBytesInClosedChannels()) + } + + t.Run("case1", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 2138, + afterAddingToChannel: 2138, + afterSealingChannel: 2660, + }) + }) + + t.Run("case2", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a, b}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 3813, + afterAddingToChannel: 3813, + afterSealingChannel: 4754, + }) + }) + + t.Run("case3", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a, b, c}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 5794, + afterAddingToChannel: 5794, + afterSealingChannel: 7199, + }) + }) + + t.Run("case4", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 2138, + afterAddingToChannel: 2138, + afterSealingChannel: 2660, + }) + }) + + t.Run("case5", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a, b, c}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 5794, + afterAddingToChannel: 5794, + afterSealingChannel: 7199, + }) + }) + + t.Run("case6", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a}, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 2138, + afterAddingToChannel: 2138, + afterSealingChannel: 2606, + }) + }) + + t.Run("case7", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a, b}, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 3813, + afterAddingToChannel: 3813, + afterSealingChannel: 4590, + }) + }) + + t.Run("case8", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{a, b, c}, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 5794, + afterAddingToChannel: 5794, + afterSealingChannel: 6929, + }) + }) + + t.Run("case9", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{emptyA}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 70, + afterAddingToChannel: 70, + afterSealingChannel: 108, + }) + }) + + t.Run("case10", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{emptyA, emptyB, emptyC}, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 210, + afterAddingToChannel: 210, + afterSealingChannel: 267, + }) + }) + + t.Run("case11", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{emptyA}, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 70, + afterAddingToChannel: 70, + afterSealingChannel: 79, + }) + }) + + t.Run("case12", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: []*types.Block{emptyA, emptyB, emptyC}, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 210, + afterAddingToChannel: 210, + afterSealingChannel: 81, + }) + }) + + t.Run("case13", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: twentyBlocks, + batchType: derive.SingularBatchType, + compressor: "shadow", + afterAddingToUnsafeBlockQueue: 103070, + afterAddingToChannel: 103070, + afterSealingChannel: 128120, + }) + }) + + t.Run("case14", func(t *testing.T) { + testChannelManagerUnsafeBytes(t, testCase{ + blocks: tenBlocks, + batchType: derive.SpanBatchType, + compressor: "", + afterAddingToUnsafeBlockQueue: 50971, + afterAddingToChannel: 50971, + afterSealingChannel: 61869, + }) + }) +} diff --git a/op-batcher/batcher/config.go b/op-batcher/batcher/config.go index 7e416abcfd832..c6530742453c3 100644 --- a/op-batcher/batcher/config.go +++ b/op-batcher/batcher/config.go @@ -24,6 +24,46 @@ import ( // blob config. var maxBlobsPerBlock = params.DefaultPragueBlobConfig.Max +type ThrottleConfig struct { + AdditionalEndpoints []string + + TxSizeLowerLimit uint64 + TxSizeUpperLimit uint64 + BlockSizeLowerLimit uint64 + BlockSizeUpperLimit uint64 + + ControllerType config.ThrottleControllerType + LowerThreshold uint64 + UpperThreshold uint64 + + // PID Controller specific parameters + PidKp float64 + PidKi float64 + PidKd float64 + PidIntegralMax float64 + PidOutputMax float64 + PidSampleTime time.Duration +} + +func (c *ThrottleConfig) Check() error { + if !config.ValidThrottleControllerType(c.ControllerType) { + return fmt.Errorf("invalid throttle controller type: %s (must be one of: %v)", c.ControllerType, config.ThrottleControllerTypes) + } + + if c.LowerThreshold != 0 && c.UpperThreshold <= c.LowerThreshold { + return fmt.Errorf("throttle.upper-threshold must be greater than throttle.lower-threshold") + } + + if c.BlockSizeLowerLimit > 0 && c.BlockSizeLowerLimit >= c.BlockSizeUpperLimit { + return fmt.Errorf("throttle.block-size-lower-limit must be less than throttle.block-size-upper-limit") + } + + if c.TxSizeLowerLimit > 0 && c.ControllerType != config.StepControllerType && c.TxSizeLowerLimit >= c.TxSizeUpperLimit { + return fmt.Errorf("throttle.tx-size-lower-limit must be less than throttle.tx-size-upper-limit") + } + return nil +} + type CLIConfig struct { // L1EthRpc is the HTTP provider URL for L1. L1EthRpc string @@ -100,36 +140,11 @@ type CLIConfig struct { // ActiveSequencerCheckDuration is the duration between checks to determine the active sequencer endpoint. ActiveSequencerCheckDuration time.Duration - // ThrottleThreshold is the number of pending bytes beyond which the batcher will start throttling future bytes. Set to 0 to - // disable sequencer throttling entirely (only recommended for testing). - ThrottleThreshold uint64 - // ThrottleTxSize is the DA size of a transaction to start throttling when we are over the throttling threshold. - ThrottleTxSize uint64 - // ThrottleBlockSize is the total per-block DA limit to start imposing on block building when we are over the throttling threshold. - ThrottleBlockSize uint64 - // ThrottleAlwaysBlockSize is the total per-block DA limit to always imposing on block building. - ThrottleAlwaysBlockSize uint64 - // TestUseMaxTxSizeForBlobs allows to set the blob size with MaxL1TxSize. // Should only be used for testing purposes. TestUseMaxTxSizeForBlobs bool - // AdditionalThrottlingEndpoints is a list of additional endpoints to throttle. - AdditionalThrottlingEndpoints []string - - // ThrottleControllerType is the type of throttle controller to use. Set to step by default - ThrottleControllerType config.ThrottleControllerType - - // PID Controller specific parameters - ThrottlePidKp float64 - ThrottlePidKi float64 - ThrottlePidKd float64 - ThrottlePidIntegralMax float64 - ThrottlePidOutputMax float64 - ThrottlePidSampleTime time.Duration - - // ThrottleThresholdMultiplier is the threshold multiplier for the quadratic controller - ThrottleThresholdMultiplier float64 + ThrottleConfig ThrottleConfig TxMgrConfig txmgr.CLIConfig LogConfig oplog.CLIConfig @@ -183,8 +198,8 @@ func (c *CLIConfig) Check() error { return fmt.Errorf("too many frames for blob transactions, max %d", maxBlobsPerBlock) } - if !config.ValidThrottleControllerType(c.ThrottleControllerType) { - return fmt.Errorf("invalid throttle controller type: %s (must be one of: %v)", c.ThrottleControllerType, config.ThrottleControllerTypes) + if err := c.ThrottleConfig.Check(); err != nil { + return err } if err := c.MetricsConfig.Check(); err != nil { @@ -213,38 +228,41 @@ func NewConfig(ctx *cli.Context) *CLIConfig { PollInterval: ctx.Duration(flags.PollIntervalFlag.Name), /* Optional Flags */ - MaxPendingTransactions: ctx.Uint64(flags.MaxPendingTransactionsFlag.Name), - MaxChannelDuration: ctx.Uint64(flags.MaxChannelDurationFlag.Name), - MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name), - MaxBlocksPerSpanBatch: ctx.Int(flags.MaxBlocksPerSpanBatch.Name), - TargetNumFrames: ctx.Int(flags.TargetNumFramesFlag.Name), - ApproxComprRatio: ctx.Float64(flags.ApproxComprRatioFlag.Name), - Compressor: ctx.String(flags.CompressorFlag.Name), - CompressionAlgo: derive.CompressionAlgo(ctx.String(flags.CompressionAlgoFlag.Name)), - Stopped: ctx.Bool(flags.StoppedFlag.Name), - WaitNodeSync: ctx.Bool(flags.WaitNodeSyncFlag.Name), - CheckRecentTxsDepth: ctx.Int(flags.CheckRecentTxsDepthFlag.Name), - BatchType: ctx.Uint(flags.BatchTypeFlag.Name), - DataAvailabilityType: flags.DataAvailabilityType(ctx.String(flags.DataAvailabilityTypeFlag.Name)), - ActiveSequencerCheckDuration: ctx.Duration(flags.ActiveSequencerCheckDurationFlag.Name), - TxMgrConfig: txmgr.ReadCLIConfig(ctx), - LogConfig: oplog.ReadCLIConfig(ctx), - MetricsConfig: opmetrics.ReadCLIConfig(ctx), - PprofConfig: oppprof.ReadCLIConfig(ctx), - RPC: oprpc.ReadCLIConfig(ctx), - AltDA: altda.ReadCLIConfig(ctx), - ThrottleThreshold: ctx.Uint64(flags.ThrottleThresholdFlag.Name), - ThrottleTxSize: ctx.Uint64(flags.ThrottleTxSizeFlag.Name), - ThrottleBlockSize: ctx.Uint64(flags.ThrottleBlockSizeFlag.Name), - ThrottleAlwaysBlockSize: ctx.Uint64(flags.ThrottleAlwaysBlockSizeFlag.Name), - AdditionalThrottlingEndpoints: ctx.StringSlice(flags.AdditionalThrottlingEndpointsFlag.Name), - ThrottleControllerType: config.ThrottleControllerType(ctx.String(flags.ThrottleControllerTypeFlag.Name)), - ThrottlePidKp: ctx.Float64(flags.ThrottlePidKpFlag.Name), - ThrottlePidKi: ctx.Float64(flags.ThrottlePidKiFlag.Name), - ThrottlePidKd: ctx.Float64(flags.ThrottlePidKdFlag.Name), - ThrottlePidIntegralMax: ctx.Float64(flags.ThrottlePidIntegralMaxFlag.Name), - ThrottlePidOutputMax: ctx.Float64(flags.ThrottlePidOutputMaxFlag.Name), - ThrottlePidSampleTime: ctx.Duration(flags.ThrottlePidSampleTimeFlag.Name), - ThrottleThresholdMultiplier: ctx.Float64(flags.ThrottleThresholdMultiplierFlag.Name), + MaxPendingTransactions: ctx.Uint64(flags.MaxPendingTransactionsFlag.Name), + MaxChannelDuration: ctx.Uint64(flags.MaxChannelDurationFlag.Name), + MaxL1TxSize: ctx.Uint64(flags.MaxL1TxSizeBytesFlag.Name), + MaxBlocksPerSpanBatch: ctx.Int(flags.MaxBlocksPerSpanBatch.Name), + TargetNumFrames: ctx.Int(flags.TargetNumFramesFlag.Name), + ApproxComprRatio: ctx.Float64(flags.ApproxComprRatioFlag.Name), + Compressor: ctx.String(flags.CompressorFlag.Name), + CompressionAlgo: derive.CompressionAlgo(ctx.String(flags.CompressionAlgoFlag.Name)), + Stopped: ctx.Bool(flags.StoppedFlag.Name), + WaitNodeSync: ctx.Bool(flags.WaitNodeSyncFlag.Name), + CheckRecentTxsDepth: ctx.Int(flags.CheckRecentTxsDepthFlag.Name), + BatchType: ctx.Uint(flags.BatchTypeFlag.Name), + DataAvailabilityType: flags.DataAvailabilityType(ctx.String(flags.DataAvailabilityTypeFlag.Name)), + ActiveSequencerCheckDuration: ctx.Duration(flags.ActiveSequencerCheckDurationFlag.Name), + TxMgrConfig: txmgr.ReadCLIConfig(ctx), + LogConfig: oplog.ReadCLIConfig(ctx), + MetricsConfig: opmetrics.ReadCLIConfig(ctx), + PprofConfig: oppprof.ReadCLIConfig(ctx), + RPC: oprpc.ReadCLIConfig(ctx), + AltDA: altda.ReadCLIConfig(ctx), + ThrottleConfig: ThrottleConfig{ + AdditionalEndpoints: ctx.StringSlice(flags.AdditionalThrottlingEndpointsFlag.Name), + TxSizeLowerLimit: ctx.Uint64(flags.ThrottleTxSizeLowerLimitFlag.Name), + TxSizeUpperLimit: ctx.Uint64(flags.ThrottleTxSizeUpperLimitFlag.Name), + BlockSizeLowerLimit: ctx.Uint64(flags.ThrottleBlockSizeLowerLimitFlag.Name), + BlockSizeUpperLimit: ctx.Uint64(flags.ThrottleBlockSizeUpperLimitFlag.Name), + ControllerType: config.ThrottleControllerType(ctx.String(flags.ThrottleControllerTypeFlag.Name)), + LowerThreshold: ctx.Uint64(flags.ThrottleUsafeDABytesLowerThresholdFlag.Name), + UpperThreshold: ctx.Uint64(flags.ThrottleUsafeDABytesUpperThresholdFlag.Name), + PidKp: ctx.Float64(flags.ThrottlePidKpFlag.Name), + PidKi: ctx.Float64(flags.ThrottlePidKiFlag.Name), + PidKd: ctx.Float64(flags.ThrottlePidKdFlag.Name), + PidIntegralMax: ctx.Float64(flags.ThrottlePidIntegralMaxFlag.Name), + PidOutputMax: ctx.Float64(flags.ThrottlePidOutputMaxFlag.Name), + PidSampleTime: ctx.Duration(flags.ThrottlePidSampleTimeFlag.Name), + }, } } diff --git a/op-batcher/batcher/config_test.go b/op-batcher/batcher/config_test.go index d12ff6ab31478..192ded7cb9461 100644 --- a/op-batcher/batcher/config_test.go +++ b/op-batcher/batcher/config_test.go @@ -38,11 +38,17 @@ func validBatcherConfig() batcher.CLIConfig { MetricsConfig: metrics.DefaultCLIConfig(), PprofConfig: oppprof.DefaultCLIConfig(), // The compressor config is not checked in config.Check() - RPC: rpc.DefaultCLIConfig(), - CompressionAlgo: derive.Zlib, - ThrottleThreshold: 0, // no DA throttling - ThrottleTxSize: 0, - ThrottleControllerType: "step", // default controller type + RPC: rpc.DefaultCLIConfig(), + CompressionAlgo: derive.Zlib, + ThrottleConfig: batcher.ThrottleConfig{ + ControllerType: flags.DefaultThrottleControllerType, + LowerThreshold: flags.DefaultThrottleLowerThreshold, + UpperThreshold: flags.DefaultThrottleUpperThreshold, + TxSizeLowerLimit: flags.DefaultThrottleTxSizeLowerLimit, + TxSizeUpperLimit: flags.DefaultThrottleTxSizeUpperLimit, + BlockSizeLowerLimit: flags.DefaultThrottleBlockSizeLowerLimit, + BlockSizeUpperLimit: flags.DefaultThrottleBlockSizeUpperLimit, + }, } } @@ -121,6 +127,24 @@ func TestBatcherConfig(t *testing.T) { }, errString: "invalid ApproxComprRatio 4.2 for ratio compressor", }, + { + name: "throttle_max_threshold=throttle_threshold", + override: func(c *batcher.CLIConfig) { + c.ThrottleConfig.LowerThreshold = 5 + c.ThrottleConfig.UpperThreshold = 5 + + }, + errString: "throttle.upper-threshold must be greater than throttle.lower-threshold", + }, + { + name: "throttle_max_threshold=throttle_threshold", + override: func(c *batcher.CLIConfig) { + c.ThrottleConfig.LowerThreshold = 5 + c.ThrottleConfig.UpperThreshold = 4 + + }, + errString: "throttle.upper-threshold must be greater than throttle.lower-threshold", + }, } for _, test := range tests { diff --git a/op-batcher/batcher/driver.go b/op-batcher/batcher/driver.go index fdde1ca56ffd3..308c1abbc73f9 100644 --- a/op-batcher/batcher/driver.go +++ b/op-batcher/batcher/driver.go @@ -118,6 +118,8 @@ type BatchSubmitter struct { prevCurrentL1 eth.L1BlockRef // cached CurrentL1 from the last syncStatus throttleController *throttler.ThrottleController + + publishSignal chan bool // true if we should force a tx to be published now, false if we should check the usual conditions (timeouts) } // NewBatchSubmitter initializes the BatchSubmitter driver from a preconfigured DriverSetup @@ -172,21 +174,22 @@ func (l *BatchSubmitter) StartBatchSubmitting() error { l.txpoolState = TxpoolGood // no need to lock mutex as no other routines yet exist // Channels used to signal between the loops - pendingBytesUpdated := make(chan int64, 1) - publishSignal := make(chan struct{}, 1) + unsafeBytesUpdated := make(chan int64, 1) + publishSignal := make(chan bool, 1) + l.publishSignal = publishSignal // DA throttling loop should always be started except for testing (indicated by ThrottleThreshold == 0) - if l.Config.ThrottleParams.Threshold > 0 { + if l.Config.ThrottleParams.LowerThreshold > 0 { l.wg.Add(1) - go l.throttlingLoop(l.wg, pendingBytesUpdated) // ranges over pendingBytesUpdated channel + go l.throttlingLoop(l.wg, unsafeBytesUpdated) // ranges over unsafeBytesUpdated channel } else { l.Log.Warn("Throttling loop is DISABLED due to 0 throttle-threshold. This should not be disabled in prod.") } l.wg.Add(3) - go l.receiptsLoop(l.wg, receiptsCh) // ranges over receiptsCh channel - go l.publishingLoop(l.killCtx, l.wg, receiptsCh, publishSignal) // ranges over publishSignal, spawns routines which send on receiptsCh. Closes receiptsCh when done. - go l.blockLoadingLoop(l.shutdownCtx, l.wg, pendingBytesUpdated, publishSignal) // sends on pendingBytesUpdated (if throttling enabled), and publishSignal. Closes them both when done + go l.receiptsLoop(l.wg, receiptsCh) // ranges over receiptsCh channel + go l.publishingLoop(l.killCtx, l.wg, receiptsCh, publishSignal) // ranges over publishSignal, spawns routines which send on receiptsCh. Closes receiptsCh when done. + go l.blockLoadingLoop(l.shutdownCtx, l.wg, unsafeBytesUpdated, publishSignal) // sends on unsafeBytesUpdated (if throttling enabled), and publishSignal. Closes them both when done l.Log.Info("Batch Submitter started") return nil @@ -259,9 +262,21 @@ func (l *BatchSubmitter) StopBatchSubmitting(ctx context.Context) error { return nil } +// Flush forces the batcher to submit any pending data immediately. +// This works by signaling the publishing loop to process any available data. +func (l *BatchSubmitter) Flush(ctx context.Context) error { + if !l.running { + return ErrBatcherNotRunning + } + + l.Log.Info("Flushing Batch Submitter") + trySignal(l.publishSignal, true) + return nil +} + // loadBlocksIntoState loads the blocks between start and end (inclusive). // If there is a reorg, it will return an error. -func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uint64, publishSignal chan struct{}, pendingBytesUpdated chan int64) error { +func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uint64, publishSignal chan bool, unsafeBytesUpdated chan int64) error { if end < start { return fmt.Errorf("start number is > end number %d,%d", start, end) } @@ -288,8 +303,8 @@ func (l *BatchSubmitter) loadBlocksIntoState(ctx context.Context, start, end uin // Every 100 blocks, signal the publishing loop to publish. // This allows the batcher to start publishing sooner in the // case of a large backlog of blocks to load. - trySignal(publishSignal) - l.sendToThrottlingLoop(pendingBytesUpdated) + l.sendToThrottlingLoop(unsafeBytesUpdated) + trySignal(publishSignal, false) } } @@ -404,29 +419,31 @@ const ( TxpoolCancelPending ) -// sendToThrottlingLoop sends the current pending bytes to the throttling loop. +func (l *BatchSubmitter) unsafeDABytes() int64 { + l.channelMgrMutex.Lock() + defer l.channelMgrMutex.Unlock() + return l.channelMgr.UnsafeDABytes() +} + +// sendToThrottlingLoop sends the current unsafe bytes to the throttling loop. // It is not blocking, no signal will be sent if the channel is full. -func (l *BatchSubmitter) sendToThrottlingLoop(pendingBytesUpdated chan int64) { - if l.Config.ThrottleParams.Threshold == 0 { +func (l *BatchSubmitter) sendToThrottlingLoop(unsafeBytesUpdated chan int64) { + if l.Config.ThrottleParams.LowerThreshold == 0 { return } - l.channelMgrMutex.Lock() - pendingBytes := l.channelMgr.PendingDABytes() - l.channelMgrMutex.Unlock() - // notify the throttling loop it may be time to initiate throttling without blocking select { - case pendingBytesUpdated <- pendingBytes: + case unsafeBytesUpdated <- l.unsafeDABytes(): default: } } // trySignal tries to send an empty struct on the provided channel. // It is not blocking, no signal will be sent if the channel is full. -func trySignal(c chan struct{}) { +func trySignal(c chan bool, value bool) { select { - case c <- struct{}{}: + case c <- value: default: } } @@ -472,7 +489,7 @@ func (l *BatchSubmitter) syncAndPrune(syncStatus *eth.SyncStatus) *inclusiveBloc // - waits for a signal that blocks have been loaded // - drives the creation of channels and frames // - sends transactions to the DA layer -func (l *BatchSubmitter) publishingLoop(ctx context.Context, wg *sync.WaitGroup, receiptsCh chan txmgr.TxReceipt[txRef], publishSignal chan struct{}) { +func (l *BatchSubmitter) publishingLoop(ctx context.Context, wg *sync.WaitGroup, receiptsCh chan txmgr.TxReceipt[txRef], publishSignal chan bool) { defer close(receiptsCh) defer wg.Done() @@ -484,9 +501,9 @@ func (l *BatchSubmitter) publishingLoop(ctx context.Context, wg *sync.WaitGroup, } txQueue := txmgr.NewQueue[txRef](ctx, l.Txmgr, l.Config.MaxPendingTransactions) - for range publishSignal { - l.Log.Debug("publishing loop received signal") - l.publishStateToL1(ctx, txQueue, receiptsCh, daGroup) + for forcePublish := range publishSignal { + l.Log.Debug("publishing loop received signal", "force_publish", forcePublish) + l.publishStateToL1(ctx, txQueue, receiptsCh, daGroup, forcePublish) } // First wait for all DA requests to finish to prevent new transactions being queued @@ -509,10 +526,10 @@ func (l *BatchSubmitter) publishingLoop(ctx context.Context, wg *sync.WaitGroup, // - polls the sequencer, // - prunes the channel manager state (i.e. safe blocks) // - loads unsafe blocks from the sequencer -func (l *BatchSubmitter) blockLoadingLoop(ctx context.Context, wg *sync.WaitGroup, pendingBytesUpdated chan int64, publishSignal chan struct{}) { +func (l *BatchSubmitter) blockLoadingLoop(ctx context.Context, wg *sync.WaitGroup, unsafeBytesUpdated chan int64, publishSignal chan bool) { ticker := time.NewTicker(l.Config.PollInterval) defer ticker.Stop() - defer close(pendingBytesUpdated) + defer close(unsafeBytesUpdated) defer close(publishSignal) defer wg.Done() for { @@ -528,7 +545,7 @@ func (l *BatchSubmitter) blockLoadingLoop(ctx context.Context, wg *sync.WaitGrou if blocksToLoad != nil { // Get fresh unsafe blocks - err := l.loadBlocksIntoState(ctx, blocksToLoad.start, blocksToLoad.end, publishSignal, pendingBytesUpdated) + err := l.loadBlocksIntoState(ctx, blocksToLoad.start, blocksToLoad.end, publishSignal, unsafeBytesUpdated) switch { case errors.Is(err, ErrReorg): l.Log.Warn("error loading blocks, clearing state and waiting for node sync", "err", err) @@ -538,10 +555,10 @@ func (l *BatchSubmitter) blockLoadingLoop(ctx context.Context, wg *sync.WaitGrou l.Log.Warn("error loading blocks, retrying on next tick", "err", err) continue default: - l.sendToThrottlingLoop(pendingBytesUpdated) // we have increased the pending data. Signal the throttling loop to check if it should throttle. + l.sendToThrottlingLoop(unsafeBytesUpdated) // we have increased the unsafe data. Signal the throttling loop to check if it should throttle. } } - trySignal(publishSignal) // always signal the write loop to ensure we periodically publish even if we aren't loading blocks + trySignal(publishSignal, false) // always signal the write loop to ensure we periodically publish even if we aren't loading blocks case <-ctx.Done(): l.Log.Info("blockLoadingLoop returning") return @@ -597,6 +614,7 @@ func (l *BatchSubmitter) singleEndpointThrottler(wg *sync.WaitGroup, throttleSig _, params := l.throttleController.Load() var success bool + l.Log.Debug("Setting max DA size on endpoint", "endpoint", endpoint, "max_tx_size", params.MaxTxSize, "max_block_size", params.MaxBlockSize) err := client.CallContext( ctx, &success, SetMaxDASizeMethod, hexutil.Uint64(params.MaxTxSize), hexutil.Uint64(params.MaxBlockSize), ) @@ -656,13 +674,13 @@ func (l *BatchSubmitter) singleEndpointThrottler(wg *sync.WaitGroup, throttleSig } // throttlingLoop acts as a distributor that spawns individual throttling loops for each endpoint -// and fans out the pending bytes updates to each endpoint -func (l *BatchSubmitter) throttlingLoop(wg *sync.WaitGroup, pendingBytesUpdated chan int64) { +// and fans out the unsafe bytes updates to each endpoint +func (l *BatchSubmitter) throttlingLoop(wg *sync.WaitGroup, unsafeBytesUpdated chan int64) { defer wg.Done() l.Log.Info("Starting DA throttling loop", "controller_type", l.throttleController.GetType(), - "threshold", l.Config.ThrottleParams.Threshold, - "max_threshold", float64(l.Config.ThrottleParams.Threshold)*l.Config.ThrottleParams.ThresholdMultiplier, + "lower_threshold", l.Config.ThrottleParams.LowerThreshold, + "upper_threshold", l.Config.ThrottleParams.UpperThreshold, ) updateChans := make([]chan struct{}, len(l.Config.ThrottleParams.Endpoints)) @@ -674,21 +692,21 @@ func (l *BatchSubmitter) throttlingLoop(wg *sync.WaitGroup, pendingBytesUpdated go l.singleEndpointThrottler(&innerWg, updateChans[i], endpoint) } - for pb := range pendingBytesUpdated { - newParams := l.throttleController.Update(uint64(pb)) + for unsafeBytes := range unsafeBytesUpdated { + l.Metr.RecordUnsafeDABytes(unsafeBytes) + newParams := l.throttleController.Update(uint64(unsafeBytes)) controllerType := l.throttleController.GetType() l.Metr.RecordThrottleIntensity(newParams.Intensity, controllerType) l.Metr.RecordThrottleParams(newParams.MaxTxSize, newParams.MaxBlockSize) - if l.Config.ThrottleParams.Threshold > 0 { - l.Metr.RecordPendingBytesVsThreshold(uint64(pb), l.Config.ThrottleParams.Threshold, controllerType) + if l.Config.ThrottleParams.LowerThreshold > 0 { + l.Metr.RecordUnsafeBytesVsThreshold(uint64(unsafeBytes), l.Config.ThrottleParams.LowerThreshold, controllerType) } // Update throttling state if newParams.IsThrottling() { - l.Log.Warn("Throttling loop: pending bytes above threshold, scaling endpoint throttling based on intensity", - "pending_bytes", pb, - "threshold", l.Config.ThrottleParams.Threshold, + l.Log.Warn("Throttling loop: unsafe bytes above threshold, scaling endpoint throttling based on intensity", + "unsafe_bytes", unsafeBytes, "intensity", newParams.Intensity, "max_tx_size", newParams.MaxTxSize, "max_block_size", newParams.MaxBlockSize, @@ -755,7 +773,7 @@ func (l *BatchSubmitter) waitNodeSync() error { // publishStateToL1 queues up all pending TxData to be published to the L1, returning when there is no more data to // queue for publishing or if there was an error queuing the data. -func (l *BatchSubmitter) publishStateToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { +func (l *BatchSubmitter) publishStateToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group, forcePublish bool) { for { select { case <-ctx.Done(): @@ -772,7 +790,7 @@ func (l *BatchSubmitter) publishStateToL1(ctx context.Context, queue *txmgr.Queu return } - err := l.publishTxToL1(ctx, queue, receiptsCh, daGroup) + err := l.publishTxToL1(ctx, queue, receiptsCh, daGroup, forcePublish) if err != nil { if err != io.EOF { l.Log.Error("Error publishing tx to l1", "err", err) @@ -826,7 +844,7 @@ func (l *BatchSubmitter) clearState(ctx context.Context) { } // publishTxToL1 submits a single state tx to the L1 -func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { +func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group, forcePublish bool) error { // send all available transactions l1tip, isPectra, err := l.l1Tip(ctx) if err != nil { @@ -839,7 +857,7 @@ func (l *BatchSubmitter) publishTxToL1(ctx context.Context, queue *txmgr.Queue[t // Collect next transaction data. This pulls data out of the channel, so we need to make sure // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. l.channelMgrMutex.Lock() - txdata, err := l.channelMgr.TxData(l1tip.ID(), isPectra, params.IsThrottling()) + txdata, err := l.channelMgr.TxData(l1tip.ID(), isPectra, params.IsThrottling(), forcePublish) l.channelMgrMutex.Unlock() if err == io.EOF { @@ -1127,11 +1145,7 @@ func (l *BatchSubmitter) SetThrottleController(newType config.ThrottleController newController, err := factory.CreateController( newType, - l.Config.ThrottleParams.Threshold, - l.Config.ThrottleParams.TxSize, - l.Config.ThrottleParams.BlockSize, - l.Config.ThrottleParams.AlwaysBlockSize, - l.Config.ThrottleParams.ThresholdMultiplier, + l.Config.ThrottleParams, pidControllerConfig, ) if err != nil { @@ -1168,19 +1182,14 @@ func (l *BatchSubmitter) SetThrottleController(newType config.ThrottleController func (l *BatchSubmitter) GetThrottleControllerInfo() (config.ThrottleControllerInfo, error) { controllerType, params := l.throttleController.Load() - // Get current pending bytes - l.channelMgrMutex.Lock() - currentLoad := uint64(l.channelMgr.PendingDABytes()) - l.channelMgrMutex.Unlock() - info := config.ThrottleControllerInfo{ - Type: string(controllerType), - Threshold: l.Config.ThrottleParams.Threshold, - MaxThreshold: float64(l.Config.ThrottleParams.Threshold) * l.Config.ThrottleParams.ThresholdMultiplier, - CurrentLoad: currentLoad, - Intensity: params.Intensity, - MaxTxSize: params.MaxTxSize, - MaxBlockSize: params.MaxBlockSize, + Type: string(controllerType), + LowerThreshold: l.Config.ThrottleParams.LowerThreshold, + UpperThreshold: l.Config.ThrottleParams.UpperThreshold, + CurrentLoad: uint64(l.unsafeDABytes()), + Intensity: params.Intensity, + MaxTxSize: params.MaxTxSize, + MaxBlockSize: params.MaxBlockSize, } return info, nil @@ -1192,7 +1201,7 @@ func (l *BatchSubmitter) ResetThrottleController() error { l.throttleController.Reset() l.Metr.RecordThrottleIntensity(0.0, l.throttleController.GetType()) - l.Metr.RecordThrottleParams(0, l.Config.ThrottleParams.AlwaysBlockSize) + l.Metr.RecordThrottleParams(0, l.Config.ThrottleParams.BlockSizeUpperLimit) l.Log.Info("Successfully reset throttle controller state") return nil diff --git a/op-batcher/batcher/driver_test.go b/op-batcher/batcher/driver_test.go index 4a2f0d89c6c8d..454ade01ee756 100644 --- a/op-batcher/batcher/driver_test.go +++ b/op-batcher/batcher/driver_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-batcher/batcher/throttler" "github.com/ethereum-optimism/optimism/op-batcher/config" "github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-service/dial" @@ -231,7 +232,6 @@ func TestBatchSubmitter_ThrottlingEndpoints(t *testing.T) { // Setup test context ctx, cancel := context.WithCancel(context.Background()) - defer cancel() // Add in an endpoint with no server at all, representing an "always down" endpoint urls = append(urls, "http://invalid/") @@ -241,15 +241,16 @@ func TestBatchSubmitter_ThrottlingEndpoints(t *testing.T) { // Create test BatchSubmitter using the setup function bs, _ := setup(t) bs.shutdownCtx = ctx - bs.Config = BatcherConfig{ - NetworkTimeout: time.Second, - ThrottleParams: config.ThrottleParams{ - Threshold: 10000, - TxSize: 5000, - BlockSize: 20000, - Endpoints: urls, - }, - } + bs.Config.NetworkTimeout = time.Second + bs.Config.ThrottleParams.Endpoints = urls + bs.throttleController = throttler.NewThrottleController( + throttler.NewStepStrategy(10000), + throttler.ThrottleConfig{ + TxSizeLowerLimit: 5000, + TxSizeUpperLimit: 10000, + BlockSizeLowerLimit: 20000, + BlockSizeUpperLimit: 30000, + }) // Test the throttling loop pendingBytesUpdated := make(chan int64, 1) @@ -262,7 +263,6 @@ func TestBatchSubmitter_ThrottlingEndpoints(t *testing.T) { // Simulate block loading by sending periodically on pendingBytesUpdated wg2 := sync.WaitGroup{} blockLoadingCtx, cancelBlockLoading := context.WithCancel(context.Background()) - defer cancelBlockLoading() go func() { defer wg2.Done() // Simulate block loading @@ -284,6 +284,7 @@ func TestBatchSubmitter_ThrottlingEndpoints(t *testing.T) { wg2.Wait() close(pendingBytesUpdated) wg1.Wait() + cancel() }) require.Eventually(t, @@ -300,7 +301,7 @@ func TestBatchSubmitter_ThrottlingEndpoints(t *testing.T) { } } return true - }, time.Second*20, time.Millisecond*10, "All endpoints should have been called within 2s") + }, time.Second*10, time.Millisecond*10, "All endpoints should have been called within 10s") startTestServerAtAddr := func(addr string, handler http.HandlerFunc) *httptest.Server { ln, err := net.Listen("tcp", addr) diff --git a/op-batcher/batcher/service.go b/op-batcher/batcher/service.go index 802450533a5bf..2f0ed8ae67e64 100644 --- a/op-batcher/batcher/service.go +++ b/op-batcher/batcher/service.go @@ -107,13 +107,14 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string, bs.WaitNodeSync = cfg.WaitNodeSync bs.ThrottleParams = config.ThrottleParams{ - Threshold: cfg.ThrottleThreshold, - TxSize: cfg.ThrottleTxSize, - BlockSize: cfg.ThrottleBlockSize, - AlwaysBlockSize: cfg.ThrottleAlwaysBlockSize, - ThresholdMultiplier: cfg.ThrottleThresholdMultiplier, - ControllerType: cfg.ThrottleControllerType, - Endpoints: slices.Union(cfg.L2EthRpc, cfg.AdditionalThrottlingEndpoints), + LowerThreshold: cfg.ThrottleConfig.LowerThreshold, + UpperThreshold: cfg.ThrottleConfig.UpperThreshold, + TxSizeLowerLimit: cfg.ThrottleConfig.TxSizeLowerLimit, + TxSizeUpperLimit: cfg.ThrottleConfig.TxSizeUpperLimit, + BlockSizeLowerLimit: cfg.ThrottleConfig.BlockSizeLowerLimit, + BlockSizeUpperLimit: cfg.ThrottleConfig.BlockSizeUpperLimit, + ControllerType: cfg.ThrottleConfig.ControllerType, + Endpoints: slices.Union(cfg.L2EthRpc, cfg.ThrottleConfig.AdditionalEndpoints), } if bs.ThrottleParams.ControllerType == config.PIDControllerType { @@ -121,32 +122,32 @@ func (bs *BatcherService) initFromCLIConfig(ctx context.Context, version string, bs.Log.Warn("PID controller is EXPERIMENTAL and should only be used by control theory experts. Improper configuration can lead to system instability or poor performance. Monitor system behavior closely when using PID control.") // Validate PID configuration parameters - if cfg.ThrottlePidKp < 0 { - return fmt.Errorf("PID Kp gain must be non-negative, got %f", cfg.ThrottlePidKp) + if cfg.ThrottleConfig.PidKp < 0 { + return fmt.Errorf("PID Kp gain must be non-negative, got %f", cfg.ThrottleConfig.PidKp) } - if cfg.ThrottlePidKi < 0 { - return fmt.Errorf("PID Ki gain must be non-negative, got %f", cfg.ThrottlePidKi) + if cfg.ThrottleConfig.PidKi < 0 { + return fmt.Errorf("PID Ki gain must be non-negative, got %f", cfg.ThrottleConfig.PidKi) } - if cfg.ThrottlePidKd < 0 { - return fmt.Errorf("PID Kd gain must be non-negative, got %f", cfg.ThrottlePidKd) + if cfg.ThrottleConfig.PidKd < 0 { + return fmt.Errorf("PID Kd gain must be non-negative, got %f", cfg.ThrottleConfig.PidKd) } - if cfg.ThrottlePidIntegralMax <= 0 { - return fmt.Errorf("PID IntegralMax must be positive, got %f", cfg.ThrottlePidIntegralMax) + if cfg.ThrottleConfig.PidIntegralMax <= 0 { + return fmt.Errorf("PID IntegralMax must be positive, got %f", cfg.ThrottleConfig.PidIntegralMax) } - if cfg.ThrottlePidOutputMax <= 0 || cfg.ThrottlePidOutputMax > 1 { - return fmt.Errorf("PID OutputMax must be between 0 and 1, got %f", cfg.ThrottlePidOutputMax) + if cfg.ThrottleConfig.PidOutputMax <= 0 || cfg.ThrottleConfig.PidOutputMax > 1 { + return fmt.Errorf("PID OutputMax must be between 0 and 1, got %f", cfg.ThrottleConfig.PidOutputMax) } - if cfg.ThrottlePidSampleTime <= 0 { - return fmt.Errorf("PID SampleTime must be positive, got %v", cfg.ThrottlePidSampleTime) + if cfg.ThrottleConfig.PidSampleTime <= 0 { + return fmt.Errorf("PID SampleTime must be positive, got %v", cfg.ThrottleConfig.PidSampleTime) } bs.ThrottleParams.PIDConfig = &config.PIDConfig{ - Kp: cfg.ThrottlePidKp, - Ki: cfg.ThrottlePidKi, - Kd: cfg.ThrottlePidKd, - IntegralMax: cfg.ThrottlePidIntegralMax, - OutputMax: cfg.ThrottlePidOutputMax, - SampleTime: cfg.ThrottlePidSampleTime, + Kp: cfg.ThrottleConfig.PidKp, + Ki: cfg.ThrottleConfig.PidKi, + Kd: cfg.ThrottleConfig.PidKd, + IntegralMax: cfg.ThrottleConfig.PidIntegralMax, + OutputMax: cfg.ThrottleConfig.PidOutputMax, + SampleTime: cfg.ThrottleConfig.PidSampleTime, } bs.Log.Info("Initialized PID throttle controller", "kp", bs.ThrottleParams.PIDConfig.Kp, diff --git a/op-batcher/batcher/sync_actions.go b/op-batcher/batcher/sync_actions.go index 079206f0d5fe7..7015b6393a31b 100644 --- a/op-batcher/batcher/sync_actions.go +++ b/op-batcher/batcher/sync_actions.go @@ -5,7 +5,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/queue" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -55,7 +54,7 @@ func isZero[T comparable](x T) bool { func computeSyncActions[T channelStatuser]( newSyncStatus eth.SyncStatus, prevCurrentL1 eth.L1BlockRef, - blocks queue.Queue[*types.Block], + blocks queue.Queue[SizedBlock], channels []T, l log.Logger, ) (syncActions, bool) { diff --git a/op-batcher/batcher/sync_actions_test.go b/op-batcher/batcher/sync_actions_test.go index de0118aae82f6..17f7576fa0c0c 100644 --- a/op-batcher/batcher/sync_actions_test.go +++ b/op-batcher/batcher/sync_actions_test.go @@ -35,9 +35,9 @@ func (tcs testChannelStatuser) isTimedOut() bool { func TestBatchSubmitter_computeSyncActions(t *testing.T) { - block101 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(101)}) - block102 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(102)}) - block103 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(103)}) + block101 := SizedBlock{Block: types.NewBlockWithHeader(&types.Header{Number: big.NewInt(101)})} + block102 := SizedBlock{Block: types.NewBlockWithHeader(&types.Header{Number: big.NewInt(102)})} + block103 := SizedBlock{Block: types.NewBlockWithHeader(&types.Header{Number: big.NewInt(103)})} channel103 := testChannelStatuser{ latestL2: eth.ToBlockID(block103), @@ -46,7 +46,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { timedOut: false, } - block104 := types.NewBlockWithHeader(&types.Header{Number: big.NewInt(104)}) + block104 := SizedBlock{Block: types.NewBlockWithHeader(&types.Header{Number: big.NewInt(104)})} channel104 := testChannelStatuser{ latestL2: eth.ToBlockID(block104), @@ -63,7 +63,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { // inputs newSyncStatus eth.SyncStatus prevCurrentL1 eth.L1BlockRef - blocks queue.Queue[*types.Block] + blocks queue.Queue[SizedBlock] channels []channelStatuser // expectations expected syncActions @@ -104,7 +104,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block102, block103}, // note absence of block101 + blocks: queue.Queue[SizedBlock]{block102, block103}, // note absence of block101 channels: []channelStatuser{channel103}, expected: syncActions{ clearState: ð.BlockID{Number: 1}, @@ -122,7 +122,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ clearState: ð.BlockID{Number: 1}, @@ -140,7 +140,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ clearState: ð.BlockID{Number: 1}, @@ -158,7 +158,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ clearState: ð.BlockID{Number: 1}, @@ -175,7 +175,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 101}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block102, block103}, + blocks: queue.Queue[SizedBlock]{block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ clearState: ð.BlockID{Number: 1}, @@ -194,7 +194,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ blocksToLoad: &inclusiveBlockRange{104, 109}, @@ -210,7 +210,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{}, + blocks: queue.Queue[SizedBlock]{}, channels: []channelStatuser{}, expected: syncActions{ blocksToLoad: &inclusiveBlockRange{104, 109}, @@ -226,7 +226,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103}, channels: []channelStatuser{channel103}, expected: syncActions{ blocksToPrune: 3, @@ -243,7 +243,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101, block102, block103, block104}, + blocks: queue.Queue[SizedBlock]{block101, block102, block103, block104}, channels: []channelStatuser{channel103, channel104}, expected: syncActions{ blocksToPrune: 3, @@ -260,7 +260,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 100}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{}, + blocks: queue.Queue[SizedBlock]{}, channels: []channelStatuser{}, expected: syncActions{}, expectedLogs: noBlocksLogs, @@ -273,7 +273,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 101}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{block101}, + blocks: queue.Queue[SizedBlock]{block101}, channels: []channelStatuser{}, expected: syncActions{ blocksToPrune: 1, @@ -290,7 +290,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{}, + blocks: queue.Queue[SizedBlock]{}, channels: []channelStatuser{}, expected: syncActions{ blocksToLoad: &inclusiveBlockRange{105, 109}, @@ -305,7 +305,7 @@ func TestBatchSubmitter_computeSyncActions(t *testing.T) { UnsafeL2: eth.L2BlockRef{Number: 109}, }, prevCurrentL1: eth.BlockRef{Number: 1}, - blocks: queue.Queue[*types.Block]{}, + blocks: queue.Queue[SizedBlock]{}, channels: []channelStatuser{}, expected: syncActions{}, expectedLogs: []string{"empty BlockRef in sync status"}, diff --git a/op-batcher/batcher/throttler/controller.go b/op-batcher/batcher/throttler/controller.go index 4c036e7e0d0e3..b9dcd86626e44 100644 --- a/op-batcher/batcher/throttler/controller.go +++ b/op-batcher/batcher/throttler/controller.go @@ -1,6 +1,7 @@ package throttler import ( + "errors" "fmt" "sync" "sync/atomic" @@ -26,7 +27,7 @@ func NewThrottleController(strategy ThrottleStrategy, config ThrottleConfig) *Th // Initialize with default params initialParams := &ThrottleParams{ MaxTxSize: 0, - MaxBlockSize: config.AlwaysBlockSize, + MaxBlockSize: config.BlockSizeUpperLimit, Intensity: 0.0, } controller.currentParams.Store(initialParams) @@ -50,10 +51,7 @@ func (tc *ThrottleController) Update(currentPendingBytes uint64) ThrottleParams } // intensityToParams converts intensity to throttle parameters using common interpolation logic -func (tc *ThrottleController) intensityToParams(intensity float64, config ThrottleConfig) ThrottleParams { - maxBlockSize := config.AlwaysBlockSize - var maxTxSize uint64 = 0 - +func (tc *ThrottleController) intensityToParams(intensity float64, cfg ThrottleConfig) ThrottleParams { // Clamp intensity to 1.0 to prevent overflows, should never happen if intensity > 1.0 { log.Warn("throttler: intensity above maximum (will be clamped)", "intensity", intensity) @@ -66,25 +64,70 @@ func (tc *ThrottleController) intensityToParams(intensity float64, config Thrott intensity = 0 } - if intensity > 0 { - // Apply intensity to tx size throttling - maxTxSize = config.ThrottleTxSize + return ThrottleParams{ + MaxTxSize: tc.intensityToTxSize(intensity, cfg), + MaxBlockSize: tc.intensityToBlockSize(intensity, cfg), + Intensity: intensity, + } +} + +func (tc *ThrottleController) validateConfig(cfg ThrottleConfig) error { + if cfg.BlockSizeLowerLimit > 0 && cfg.BlockSizeLowerLimit >= cfg.BlockSizeUpperLimit { + log.Error("throttler: invalid block size limits", + "blockSizeLowerLimit", cfg.BlockSizeLowerLimit, + "blockSizeUpperLimit", cfg.BlockSizeUpperLimit, + "controllerType", tc.GetType(), + ) + return errors.New("throttler: invalid block size limits") + } + + if cfg.TxSizeLowerLimit > 0 && + tc.GetType() != config.StepControllerType && + cfg.TxSizeLowerLimit >= cfg.TxSizeUpperLimit { + log.Error("throttler: invalid tx size limits", + "txSizeLowerLimit", cfg.TxSizeLowerLimit, + "txSizeUpperLimit", cfg.TxSizeUpperLimit, + "controllerType", tc.GetType(), + ) + return errors.New("throttler: invalid tx size limits") + } + return nil +} + +// intensityToBlockSize converts intensity in [0,1] to block size +func (tc *ThrottleController) intensityToBlockSize(intensity float64, cfg ThrottleConfig) uint64 { + if cfg.BlockSizeLowerLimit == 0 { + return 0 + } - // Apply intensity to block size throttling - if maxBlockSize == 0 || (config.ThrottleBlockSize != 0 && config.ThrottleBlockSize < maxBlockSize) { - targetBlockSize := config.ThrottleBlockSize - if maxBlockSize > 0 { - // Linear interpolation between always and throttle block sizes - targetBlockSize = uint64(float64(maxBlockSize) - intensity*float64(maxBlockSize-config.ThrottleBlockSize)) - } - maxBlockSize = targetBlockSize + if intensity == 0 { + return cfg.BlockSizeUpperLimit + } else { + switch tc.strategy.GetType() { + case config.StepControllerType: + return cfg.BlockSizeLowerLimit + default: + return uint64(float64(cfg.BlockSizeUpperLimit) - intensity*float64(cfg.BlockSizeUpperLimit-cfg.BlockSizeLowerLimit)) } } - return ThrottleParams{ - MaxTxSize: maxTxSize, - MaxBlockSize: maxBlockSize, - Intensity: intensity, +} + +// intensityToTxSize converts intensity in [0,1] to tx size +func (tc *ThrottleController) intensityToTxSize(intensity float64, cfg ThrottleConfig) uint64 { + if cfg.TxSizeLowerLimit == 0 { + return 0 + } + + if intensity == 0 { + return 0 // Transactions are not throttled at 0 intensity + } else { + switch tc.strategy.GetType() { + case config.StepControllerType: + return cfg.TxSizeLowerLimit + default: + return uint64(float64(cfg.TxSizeUpperLimit) - intensity*float64(cfg.TxSizeUpperLimit-cfg.TxSizeLowerLimit)) + } } } @@ -126,7 +169,7 @@ func (tc *ThrottleController) Reset() { // Reset to default parameters resetParams := ThrottleParams{ MaxTxSize: 0, - MaxBlockSize: config.AlwaysBlockSize, + MaxBlockSize: config.BlockSizeUpperLimit, Intensity: 0.0, } tc.currentParams.Store(&resetParams) @@ -161,17 +204,16 @@ func NewThrottleControllerFactory(log log.Logger) *ThrottleControllerFactory { func (f *ThrottleControllerFactory) CreateController( controllerType config.ThrottleControllerType, - threshold, throttleTxSize, throttleBlockSize, alwaysBlockSize uint64, - thresholdMultiplier float64, + throttleParams config.ThrottleParams, pidConfig *config.PIDConfig, ) (*ThrottleController, error) { var strategy ThrottleStrategy throttleConfig := ThrottleConfig{ - Threshold: threshold, - ThrottleTxSize: throttleTxSize, - ThrottleBlockSize: throttleBlockSize, - AlwaysBlockSize: alwaysBlockSize, + TxSizeLowerLimit: throttleParams.TxSizeLowerLimit, + TxSizeUpperLimit: throttleParams.TxSizeUpperLimit, + BlockSizeLowerLimit: throttleParams.BlockSizeLowerLimit, + BlockSizeUpperLimit: throttleParams.BlockSizeUpperLimit, } // Default to step controller if no type is specified @@ -181,11 +223,11 @@ func (f *ThrottleControllerFactory) CreateController( switch controllerType { case config.StepControllerType: - strategy = NewStepStrategy(threshold) + strategy = NewStepStrategy(throttleParams.LowerThreshold) case config.LinearControllerType: - strategy = NewLinearStrategy(threshold, thresholdMultiplier, f.log) + strategy = NewLinearStrategy(throttleParams.LowerThreshold, throttleParams.UpperThreshold, f.log) case config.QuadraticControllerType: - strategy = NewQuadraticStrategy(threshold, thresholdMultiplier, f.log) + strategy = NewQuadraticStrategy(throttleParams.LowerThreshold, throttleParams.UpperThreshold, f.log) case config.PIDControllerType: log.Warn("EXPERIMENTAL FEATURE") log.Warn("PID controller is an EXPERIMENTAL feature that should only be used by experts. PID controller requires deep understanding of control theory and careful tuning. Improper configuration can lead to system instability or poor performance. Use with extreme caution in production environments.") @@ -213,10 +255,16 @@ func (f *ThrottleControllerFactory) CreateController( return nil, fmt.Errorf("PID SampleTime must be positive, got %v", pidConfig.SampleTime) } - strategy = NewPIDStrategy(threshold, *pidConfig) + strategy = NewPIDStrategy(throttleParams.LowerThreshold, *pidConfig) default: return nil, fmt.Errorf("unsupported throttle controller type: %s", controllerType) } - return NewThrottleController(strategy, throttleConfig), nil + newController := NewThrottleController(strategy, throttleConfig) + err := newController.validateConfig(throttleConfig) + if err != nil { + return nil, err + } + + return newController, nil } diff --git a/op-batcher/batcher/throttler/controller_test.go b/op-batcher/batcher/throttler/controller_test.go index 90b87b81843c5..bf36ee71742f5 100644 --- a/op-batcher/batcher/throttler/controller_test.go +++ b/op-batcher/batcher/throttler/controller_test.go @@ -8,34 +8,36 @@ import ( "github.com/ethereum-optimism/optimism/op-batcher/config" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" ) // Test configuration constants - Core throttle settings shared across all tests const ( // Primary throttle threshold: 1MB - this is the main decision point for when throttling begins - TestThresholdBytes = 1_000_000 // 1MB threshold + TestLowerThresholdBytes = 1_000_000 // 1MB threshold // Transaction and block size limits when throttling is active - TestThrottleTxSize = 5_000 // 5KB transaction size limit during throttling - TestThrottleBlockSize = 21_000 // 21KB block size limit during throttling - TestAlwaysBlockSize = 130_000 // 130KB block size limit (always enforced) + TestTxSizeLowerLimit = 5_000 // 5KB transaction size limit during throttling + TestTxSizeUpperLimit = 10_000 // 10KB transaction size limit during throttling + TestBlockSizeLowerLimit = 21_000 // 21KB block size limit during throttling + TestBlockSizeUpperLimit = 130_000 // 130KB block size limit (always enforced) // Multiplier for gradual controllers (linear, quadratic) - defines max throttling point - TestThresholdMultiplier = 2.0 // 2x threshold = maximum throttling point (2MB) + TestUpperThreshold = 2_000_000 // 2x threshold = maximum throttling point (2MB) ) // Test load scenarios - All relative to TestThresholdBytes for easy understanding const ( - TestLoadBelowThreshold = TestThresholdBytes / 2 // 500KB - 50% of threshold - TestLoadAtThreshold = TestThresholdBytes // 1MB - exactly at threshold - TestLoadQuarterAbove = TestThresholdBytes + TestThresholdBytes/4 // 1.25MB - 25% above threshold - TestLoadHalfAbove = TestThresholdBytes + TestThresholdBytes/2 // 1.5MB - 50% above threshold - TestLoadThreeQuarterAbove = TestThresholdBytes + 3*TestThresholdBytes/4 // 1.75MB - 75% above threshold - TestLoadDoubleThreshold = TestThresholdBytes * 2 // 2MB - 100% above threshold (max for 2x multiplier) - TestLoadFarAbove = TestThresholdBytes * 3 // 3MB - far above threshold - TestLoadBelowThresholdAlt = 800_000 // 800KB - alternative below threshold value - TestLoadModerateAbove = 1_200_000 // 1.2MB - moderate load above threshold - TestLoadHighAbove = 1_400_000 // 1.4MB - high load above threshold + TestLoadBelowThreshold = TestLowerThresholdBytes / 2 // 500KB - 50% of threshold + TestLoadAtThreshold = TestLowerThresholdBytes // 1MB - exactly at threshold + TestLoadQuarterAbove = TestLowerThresholdBytes + TestLowerThresholdBytes/4 // 1.25MB - 25% above threshold + TestLoadHalfAbove = TestLowerThresholdBytes + TestLowerThresholdBytes/2 // 1.5MB - 50% above threshold + TestLoadThreeQuarterAbove = TestLowerThresholdBytes + 3*TestLowerThresholdBytes/4 // 1.75MB - 75% above threshold + TestLoadDoubleThreshold = TestLowerThresholdBytes * 2 // 2MB - 100% above threshold (max for 2x multiplier) + TestLoadFarAbove = TestLowerThresholdBytes * 3 // 3MB - far above threshold + TestLoadBelowThresholdAlt = 800_000 // 800KB - alternative below threshold value + TestLoadModerateAbove = 1_200_000 // 1.2MB - moderate load above threshold + TestLoadHighAbove = 1_400_000 // 1.4MB - high load above threshold ) // Test precision and validation constants @@ -86,30 +88,37 @@ const ( var ( // Standard controller configurations - reused across tests testStepStrategy = func(t *testing.T) *StepStrategy { - return NewStepStrategy(TestThresholdBytes) + return NewStepStrategy(TestLowerThresholdBytes) } testLinearStrategy = func(t *testing.T) *LinearStrategy { - return NewLinearStrategy(TestThresholdBytes, TestThresholdMultiplier, newTestLogger(t)) + return NewLinearStrategy(TestLowerThresholdBytes, TestUpperThreshold, newTestLogger(t)) } testQuadraticStrategy = func(t *testing.T) *QuadraticStrategy { - return NewQuadraticStrategy(TestThresholdBytes, TestThresholdMultiplier, newTestLogger(t)) + return NewQuadraticStrategy(TestLowerThresholdBytes, TestUpperThreshold, newTestLogger(t)) } testPIDStrategy = func(t *testing.T) *PIDStrategy { - return NewPIDStrategy(TestThresholdBytes, TestPIDConfig) + return NewPIDStrategy(TestLowerThresholdBytes, TestPIDConfig) + } + + testThrottleConfig = ThrottleConfig{ + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, } // Standard controllers - reused across tests testStepController = func(t *testing.T) *ThrottleController { - return NewThrottleController(testStepStrategy(t), ThrottleConfig{}) + return NewThrottleController(testStepStrategy(t), testThrottleConfig) } testLinearController = func(t *testing.T) *ThrottleController { - return NewThrottleController(testLinearStrategy(t), ThrottleConfig{}) + return NewThrottleController(testLinearStrategy(t), testThrottleConfig) } testQuadraticController = func(t *testing.T) *ThrottleController { - return NewThrottleController(testQuadraticStrategy(t), ThrottleConfig{}) + return NewThrottleController(testQuadraticStrategy(t), testThrottleConfig) } testPIDController = func(t *testing.T) *ThrottleController { - return NewThrottleController(testPIDStrategy(t), ThrottleConfig{}) + return NewThrottleController(testPIDStrategy(t), testThrottleConfig) } // Test factory @@ -177,7 +186,14 @@ func TestControllerFactory(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { controller, err := factory.CreateController( - tt.controllerType, TestThresholdBytes, TestThrottleTxSize, TestThrottleBlockSize, TestAlwaysBlockSize, TestThresholdMultiplier, tt.pidConfig) + tt.controllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, + }, tt.pidConfig) if tt.expectError { if err == nil { @@ -263,8 +279,7 @@ func TestControllerAbstraction(t *testing.T) { // TestControllerStrategySwapping tests changing strategies at runtime func TestControllerStrategySwapping(t *testing.T) { // Start with step controller - stepStrategy := testStepStrategy(t) - controller := NewThrottleController(stepStrategy, ThrottleConfig{}) + controller := testStepController(t) // Test initial behavior params := controller.Update(TestLoadHalfAbove) @@ -273,7 +288,7 @@ func TestControllerStrategySwapping(t *testing.T) { } // Switch to quadratic controller - resetParams := ThrottleParams{MaxTxSize: 0, MaxBlockSize: TestAlwaysBlockSize, Intensity: 0.0} + resetParams := ThrottleParams{MaxTxSize: 0, MaxBlockSize: TestBlockSizeUpperLimit, Intensity: 0.0} controller.SetStrategy(testQuadraticStrategy(t), resetParams) // Test new behavior @@ -310,7 +325,14 @@ func TestControllerTypeConsistency(t *testing.T) { for _, tc := range testCases { t.Run(string(tc.controllerType), func(t *testing.T) { controller, err := factory.CreateController( - tc.controllerType, TestThresholdBytes, TestThrottleTxSize, TestThrottleBlockSize, TestAlwaysBlockSize, TestThresholdMultiplier, tc.pidConfig) + tc.controllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, + }, tc.pidConfig) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -366,13 +388,13 @@ func (m *mockMetrics) GetResponseTime() time.Duration { // TestIntensityToParams tests the intensityToParams function that converts intensity to ThrottleParams func TestIntensityToParams(t *testing.T) { testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: TestThrottleBlockSize, - AlwaysBlockSize: TestAlwaysBlockSize, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, } - controller := NewThrottleController(testStepStrategy(t), testConfig) + controller := NewThrottleController(testLinearStrategy(t), testConfig) tests := []struct { name string @@ -385,42 +407,42 @@ func TestIntensityToParams(t *testing.T) { name: "zero intensity", intensity: 0.0, expectedMaxTxSize: 0, - expectedMaxBlockSize: TestAlwaysBlockSize, + expectedMaxBlockSize: TestBlockSizeUpperLimit, expectedIntensity: 0.0, }, { name: "minimum positive intensity", intensity: 0.001, - expectedMaxTxSize: TestThrottleTxSize, - expectedMaxBlockSize: TestAlwaysBlockSize - uint64(0.001*float64(TestAlwaysBlockSize-TestThrottleBlockSize)), // Interpolated value + expectedMaxTxSize: TestTxSizeUpperLimit - uint64(0.001*float64(TestTxSizeUpperLimit-TestTxSizeLowerLimit)), + expectedMaxBlockSize: TestBlockSizeUpperLimit - uint64(0.001*float64(TestBlockSizeUpperLimit-TestBlockSizeLowerLimit)), // Interpolated value expectedIntensity: 0.001, }, { name: "half intensity", intensity: 0.5, - expectedMaxTxSize: TestThrottleTxSize, - expectedMaxBlockSize: TestAlwaysBlockSize - uint64(0.5*float64(TestAlwaysBlockSize-TestThrottleBlockSize)), // Interpolated value + expectedMaxTxSize: TestTxSizeUpperLimit - uint64(0.5*float64(TestTxSizeUpperLimit-TestTxSizeLowerLimit)), + expectedMaxBlockSize: TestBlockSizeUpperLimit - uint64(0.5*float64(TestBlockSizeUpperLimit-TestBlockSizeLowerLimit)), // Interpolated value expectedIntensity: 0.5, }, { name: "maximum intensity", intensity: 1.0, - expectedMaxTxSize: TestThrottleTxSize, - expectedMaxBlockSize: TestThrottleBlockSize, + expectedMaxTxSize: TestTxSizeLowerLimit, + expectedMaxBlockSize: TestBlockSizeLowerLimit, expectedIntensity: 1.0, }, { name: "intensity above maximum (should be clamped)", intensity: 1.5, - expectedMaxTxSize: TestThrottleTxSize, - expectedMaxBlockSize: TestThrottleBlockSize, + expectedMaxTxSize: TestTxSizeLowerLimit, + expectedMaxBlockSize: TestBlockSizeLowerLimit, expectedIntensity: 1.0, }, { name: "negative intensity", intensity: -0.5, expectedMaxTxSize: 0, - expectedMaxBlockSize: TestAlwaysBlockSize, + expectedMaxBlockSize: TestBlockSizeUpperLimit, expectedIntensity: 0.0, }, } @@ -444,52 +466,51 @@ func TestIntensityToParams(t *testing.T) { } } -// TestIntensityToParamsBlockSizeInterpolation tests block size interpolation when ThrottleBlockSize is less than AlwaysBlockSize func TestIntensityToParamsBlockSizeInterpolation(t *testing.T) { testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: 50_000, // 50KB throttle block size - AlwaysBlockSize: 100_000, // 100KB always block size + TxSizeLowerLimit: 50, + TxSizeUpperLimit: 100, + BlockSizeLowerLimit: 50_000, // 50KB + BlockSizeUpperLimit: 100_000, // 100KB } - controller := NewThrottleController(testStepStrategy(t), testConfig) + controller := NewThrottleController(testLinearStrategy(t), testConfig) tests := []struct { name string intensity float64 + expectedMaxTxSize uint64 expectedMaxBlockSize uint64 - tolerance uint64 }{ { - name: "zero intensity - always block size", + name: "zero intensity - upper limit", intensity: 0.0, + expectedMaxTxSize: 0, expectedMaxBlockSize: 100_000, - tolerance: 0, }, { name: "25% intensity - 75% of way to throttle size", intensity: 0.25, + expectedMaxTxSize: 87, expectedMaxBlockSize: 87_500, // 100_000 - 0.25 * (100_000 - 50_000) - tolerance: 100, }, { name: "50% intensity - 50% of way to throttle size", intensity: 0.5, + expectedMaxTxSize: 75, expectedMaxBlockSize: 75_000, // 100_000 - 0.5 * (100_000 - 50_000) - tolerance: 100, }, { name: "75% intensity - 25% of way to throttle size", intensity: 0.75, + expectedMaxTxSize: 62, expectedMaxBlockSize: 62_500, // 100_000 - 0.75 * (100_000 - 50_000) - tolerance: 100, }, { name: "100% intensity - throttle block size", intensity: 1.0, + expectedMaxTxSize: 50, expectedMaxBlockSize: 50_000, - tolerance: 0, }, } @@ -497,10 +518,14 @@ func TestIntensityToParamsBlockSizeInterpolation(t *testing.T) { t.Run(tt.name, func(t *testing.T) { params := controller.intensityToParams(tt.intensity, testConfig) - if params.MaxBlockSize > tt.expectedMaxBlockSize+tt.tolerance || - params.MaxBlockSize < tt.expectedMaxBlockSize-tt.tolerance { - t.Errorf("expected MaxBlockSize %d ± %d, got %d", - tt.expectedMaxBlockSize, tt.tolerance, params.MaxBlockSize) + if params.MaxBlockSize != tt.expectedMaxBlockSize { + t.Errorf("expected MaxBlockSize %d, got %d", + tt.expectedMaxBlockSize, params.MaxBlockSize) + } + + if params.MaxTxSize != tt.expectedMaxTxSize { + t.Errorf("expected MaxTxSize %d, got %d", + tt.expectedMaxTxSize, params.MaxTxSize) } if params.Intensity != tt.intensity { @@ -510,68 +535,97 @@ func TestIntensityToParamsBlockSizeInterpolation(t *testing.T) { } } -// TestIntensityToParamsEdgeCases tests edge cases for the intensityToParams function -func TestIntensityToParamsEdgeCases(t *testing.T) { - t.Run("zero throttle block size", func(t *testing.T) { - testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: 0, - AlwaysBlockSize: TestAlwaysBlockSize, - } - - controller := NewThrottleController(testStepStrategy(t), testConfig) - params := controller.intensityToParams(0.5, testConfig) +// TestControllerFactoryEdgeCases tests edge cases for the factory's CreateController method +func TestControllerFactoryEdgeCases(t *testing.T) { + factory := testFactory(t) - if params.MaxBlockSize != TestAlwaysBlockSize { - t.Errorf("expected MaxBlockSize %d with zero throttle block size, got %d", - TestAlwaysBlockSize, params.MaxBlockSize) - } + t.Run("block size upper limit less than lower limit", func(t *testing.T) { + controller, err := factory.CreateController( + config.StepControllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: 5, + BlockSizeUpperLimit: 4, // Upper limit less than lower limit + }, nil) + + require.Error(t, err, "expected error when block size upper limit is less than lower limit") + require.Nil(t, controller, "expected nil controller when configuration is invalid") }) - t.Run("throttle block size greater than always block size", func(t *testing.T) { - testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: TestAlwaysBlockSize + 50_000, // Greater than always size - AlwaysBlockSize: TestAlwaysBlockSize, - } + t.Run("zero upper limit", func(t *testing.T) { + controller, err := factory.CreateController( + config.StepControllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: 0, // Zero upper limit + }, nil) + + require.Error(t, err, "expected error when block size upper limit is zero") + require.Nil(t, controller, "expected nil controller when configuration is invalid") + }) - controller := NewThrottleController(testStepStrategy(t), testConfig) - params := controller.intensityToParams(0.5, testConfig) + t.Run("block size lower limit greater than upper limit", func(t *testing.T) { + controller, err := factory.CreateController( + config.StepControllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeUpperLimit + 50_000, // Greater than upper limit + BlockSizeUpperLimit: TestBlockSizeUpperLimit, + }, nil) + + require.Error(t, err, "expected error when block size lower limit is greater than upper limit") + require.Nil(t, controller, "expected nil controller when configuration is invalid") + }) - // Should use always block size when throttle block size is greater - if params.MaxBlockSize != TestAlwaysBlockSize { - t.Errorf("expected MaxBlockSize %d when throttle > always, got %d", - TestAlwaysBlockSize, params.MaxBlockSize) - } + t.Run("valid configuration should not error", func(t *testing.T) { + controller, err := factory.CreateController( + config.StepControllerType, config.ThrottleParams{ + LowerThreshold: TestLowerThresholdBytes, + UpperThreshold: TestUpperThreshold, + TxSizeLowerLimit: TestTxSizeLowerLimit, + TxSizeUpperLimit: TestTxSizeUpperLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, + }, nil) + + require.NoError(t, err, "expected valid configuration to create controller without error") + require.NotNil(t, controller, "expected valid controller to be created") }) +} - t.Run("zero always block size", func(t *testing.T) { +// TestIntensityToParamsEdgeCases tests edge cases for the intensityToParams function +func TestIntensityToParamsEdgeCases(t *testing.T) { + t.Run("zero BlockSizeLowerLimit", func(t *testing.T) { testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: TestThrottleBlockSize, - AlwaysBlockSize: 0, + TxSizeLowerLimit: TestTxSizeLowerLimit, + BlockSizeLowerLimit: 0, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, } controller := NewThrottleController(testStepStrategy(t), testConfig) params := controller.intensityToParams(0.5, testConfig) - if params.MaxBlockSize != TestThrottleBlockSize { - t.Errorf("expected MaxBlockSize %d with zero always block size, got %d", - TestThrottleBlockSize, params.MaxBlockSize) + if params.MaxBlockSize != 0 { + t.Errorf("expected MaxBlockSize %d with zero throttle block size, got %d", + 0, params.MaxBlockSize) } }) + } // TestIntensityToParamsConsistency tests that intensityToParams produces consistent results func TestIntensityToParamsConsistency(t *testing.T) { testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: TestThrottleBlockSize, - AlwaysBlockSize: TestAlwaysBlockSize, + TxSizeLowerLimit: TestTxSizeLowerLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, } controller := NewThrottleController(testStepStrategy(t), testConfig) @@ -597,10 +651,9 @@ func TestIntensityToParamsConsistency(t *testing.T) { // TestIntensityToParamsThreadSafety tests that intensityToParams is thread-safe func TestIntensityToParamsThreadSafety(t *testing.T) { testConfig := ThrottleConfig{ - Threshold: TestThresholdBytes, - ThrottleTxSize: TestThrottleTxSize, - ThrottleBlockSize: TestThrottleBlockSize, - AlwaysBlockSize: TestAlwaysBlockSize, + TxSizeLowerLimit: TestTxSizeLowerLimit, + BlockSizeLowerLimit: TestBlockSizeLowerLimit, + BlockSizeUpperLimit: TestBlockSizeUpperLimit, } controller := NewThrottleController(testStepStrategy(t), testConfig) @@ -627,9 +680,9 @@ func TestIntensityToParamsThreadSafety(t *testing.T) { goroutineId, j, intensity, params.Intensity) } - if intensity > 0 && params.MaxTxSize != TestThrottleTxSize { + if intensity > 0 && params.MaxTxSize != TestTxSizeLowerLimit { t.Errorf("goroutine %d call %d: expected MaxTxSize %d, got %d", - goroutineId, j, TestThrottleTxSize, params.MaxTxSize) + goroutineId, j, TestTxSizeLowerLimit, params.MaxTxSize) } } }(i) diff --git a/op-batcher/batcher/throttler/linear_strategy.go b/op-batcher/batcher/throttler/linear_strategy.go index 4f68a225a8149..e4079cdd65b3e 100644 --- a/op-batcher/batcher/throttler/linear_strategy.go +++ b/op-batcher/batcher/throttler/linear_strategy.go @@ -9,23 +9,21 @@ import ( // LinearStrategy implements linear throttling for a smoother and more eager response than the step strategy type LinearStrategy struct { - threshold uint64 - maxThreshold uint64 + lowerThreshold uint64 + upperThreshold uint64 mu sync.RWMutex currentIntensity float64 } -func NewLinearStrategy(threshold uint64, multiplier float64, log log.Logger) *LinearStrategy { - maxThreshold := uint64(float64(threshold) * multiplier) - // Ensure maxThreshold is always greater than threshold to prevent division by zero - if maxThreshold <= threshold { - maxThreshold = threshold + 1 - log.Warn("maxThreshold is less than or equal to threshold, setting maxThreshold to threshold + 1", "threshold", threshold, "multiplier", multiplier, "maxThreshold", maxThreshold) +func NewLinearStrategy(lowerThreshold uint64, upperThreshold uint64, log log.Logger) *LinearStrategy { + if upperThreshold <= lowerThreshold { + panic("maxThreshold must be greater than threshold") } + return &LinearStrategy{ - threshold: threshold, - maxThreshold: maxThreshold, + lowerThreshold: lowerThreshold, + upperThreshold: upperThreshold, currentIntensity: 0.0, } } @@ -33,13 +31,13 @@ func NewLinearStrategy(threshold uint64, multiplier float64, log log.Logger) *Li func (q *LinearStrategy) Update(currentPendingBytes uint64) float64 { var intensity float64 = 0.0 - if currentPendingBytes > q.threshold { + if currentPendingBytes > q.lowerThreshold { // Linear scaling from threshold to maxThreshold - if currentPendingBytes >= q.maxThreshold { + if currentPendingBytes >= q.upperThreshold { intensity = 1.0 } else { // Linear interpolation (x curve for more aggressive throttling) - intensity = float64(currentPendingBytes-q.threshold) / float64(q.maxThreshold-q.threshold) + intensity = float64(currentPendingBytes-q.lowerThreshold) / float64(q.upperThreshold-q.lowerThreshold) } } diff --git a/op-batcher/batcher/throttler/linear_strategy_test.go b/op-batcher/batcher/throttler/linear_strategy_test.go index ba585da87f8e6..bcef50f375b6a 100644 --- a/op-batcher/batcher/throttler/linear_strategy_test.go +++ b/op-batcher/batcher/throttler/linear_strategy_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-batcher/config" + "github.com/stretchr/testify/require" ) // Test constants specific to linear strategy @@ -18,14 +19,14 @@ const ( ) func TestLinearStrategy_NewLinearStrategy(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) - if strategy.threshold != TestLinearThreshold { - t.Errorf("expected threshold %d, got %d", TestLinearThreshold, strategy.threshold) + if strategy.lowerThreshold != TestLinearThreshold { + t.Errorf("expected threshold %d, got %d", TestLinearThreshold, strategy.lowerThreshold) } - if strategy.maxThreshold != TestLinearMaxThreshold { - t.Errorf("expected maxThreshold %d, got %d", TestLinearMaxThreshold, strategy.maxThreshold) + if strategy.upperThreshold != TestLinearMaxThreshold { + t.Errorf("expected maxThreshold %d, got %d", TestLinearMaxThreshold, strategy.upperThreshold) } // Test initial state @@ -40,7 +41,7 @@ func TestLinearStrategy_NewLinearStrategy(t *testing.T) { } func TestLinearStrategy_Update(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) tests := []struct { name string @@ -116,7 +117,7 @@ func TestLinearStrategy_Update(t *testing.T) { } func TestLinearStrategy_LinearScaling(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) // Test that intensity scales linearly between threshold and maxThreshold testPoints := []struct { @@ -144,7 +145,7 @@ func TestLinearStrategy_LinearScaling(t *testing.T) { } func TestLinearStrategy_GetType(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) if strategy.GetType() != config.LinearControllerType { t.Errorf("expected GetType() to return %s, got %s", config.LinearControllerType, strategy.GetType()) @@ -152,7 +153,7 @@ func TestLinearStrategy_GetType(t *testing.T) { } func TestLinearStrategy_Reset(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) // Update to build some state strategy.Update(TestLinearMaxThreshold) @@ -171,30 +172,17 @@ func TestLinearStrategy_Reset(t *testing.T) { } func TestLinearStrategy_EdgeCases(t *testing.T) { - t.Run("multiplier less than 1", func(t *testing.T) { - // Test when multiplier results in maxThreshold <= threshold - strategy := NewLinearStrategy(TestLinearThreshold, 0.5, newTestLogger(t)) + t.Run("max threshold less than threshold", func(t *testing.T) { - // Should handle this gracefully without division by zero - intensity := strategy.Update(TestLinearThreshold * 2) - - if intensity < TestIntensityMin || intensity > TestIntensityMax { - t.Errorf("expected valid intensity [%f,%f], got %f", TestIntensityMin, TestIntensityMax, intensity) - } - }) - - t.Run("zero threshold", func(t *testing.T) { - strategy := NewLinearStrategy(0, TestLinearMultiplier, newTestLogger(t)) - - intensity := strategy.Update(1) + require.Panics(t, func() { + // Test when multiplier results in maxThreshold <= threshold + NewLinearStrategy(TestLinearThreshold, 0, newTestLogger(t)) + }) - if intensity != TestIntensityMax { - t.Errorf("expected maximum intensity with zero threshold, got %f", intensity) - } }) t.Run("very large multiplier", func(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, 100.0, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearThreshold*2000, newTestLogger(t)) // Even with very large multiplier, should work correctly intensity := strategy.Update(TestLinearThreshold * 2) @@ -207,7 +195,7 @@ func TestLinearStrategy_EdgeCases(t *testing.T) { } func TestLinearStrategy_Load(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) // Test load consistency after update updateIntensity := strategy.Update(TestLinearThreshold + TestLinearThreshold/2) @@ -223,7 +211,7 @@ func TestLinearStrategy_Load(t *testing.T) { } func TestLinearStrategy_IntensityProgression(t *testing.T) { - strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMultiplier, newTestLogger(t)) + strategy := NewLinearStrategy(TestLinearThreshold, TestLinearMaxThreshold, newTestLogger(t)) // Test that intensity increases properly as load increases loads := []uint64{ diff --git a/op-batcher/batcher/throttler/quadratic_strategy.go b/op-batcher/batcher/throttler/quadratic_strategy.go index a05db9e66279b..734a285504194 100644 --- a/op-batcher/batcher/throttler/quadratic_strategy.go +++ b/op-batcher/batcher/throttler/quadratic_strategy.go @@ -9,23 +9,20 @@ import ( // QuadraticStrategy implements quadratic throttling for more aggressive scaling type QuadraticStrategy struct { - threshold uint64 - maxThreshold uint64 + lowerThreshold uint64 + upperThreshold uint64 mu sync.RWMutex currentIntensity float64 } -func NewQuadraticStrategy(threshold uint64, multiplier float64, log log.Logger) *QuadraticStrategy { - maxThreshold := uint64(float64(threshold) * multiplier) - // Ensure maxThreshold is always greater than threshold to prevent division by zero - if maxThreshold <= threshold { - maxThreshold = threshold + 1 - log.Warn("maxThreshold is less than or equal to threshold, setting maxThreshold to threshold + 1", "threshold", threshold, "multiplier", multiplier, "maxThreshold", maxThreshold) +func NewQuadraticStrategy(lowerThreshold uint64, upperThreshold uint64, log log.Logger) *QuadraticStrategy { + if upperThreshold <= lowerThreshold { + panic("maxThreshold must be greater than threshold") } return &QuadraticStrategy{ - threshold: threshold, - maxThreshold: maxThreshold, + lowerThreshold: lowerThreshold, + upperThreshold: upperThreshold, currentIntensity: 0.0, } } @@ -33,13 +30,13 @@ func NewQuadraticStrategy(threshold uint64, multiplier float64, log log.Logger) func (q *QuadraticStrategy) Update(currentPendingBytes uint64) float64 { var intensity float64 = 0.0 - if currentPendingBytes > q.threshold { + if currentPendingBytes > q.lowerThreshold { // Quadratic scaling from threshold to maxThreshold - if currentPendingBytes >= q.maxThreshold { + if currentPendingBytes >= q.upperThreshold { intensity = 1.0 } else { // Quadratic interpolation (x^2 curve for more aggressive throttling) - linear := float64(currentPendingBytes-q.threshold) / float64(q.maxThreshold-q.threshold) + linear := float64(currentPendingBytes-q.lowerThreshold) / float64(q.upperThreshold-q.lowerThreshold) intensity = linear * linear } } diff --git a/op-batcher/batcher/throttler/quadratic_strategy_test.go b/op-batcher/batcher/throttler/quadratic_strategy_test.go index 05b4ea0f5c7ea..a88c0f0755b61 100644 --- a/op-batcher/batcher/throttler/quadratic_strategy_test.go +++ b/op-batcher/batcher/throttler/quadratic_strategy_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-batcher/config" + "github.com/stretchr/testify/require" ) // Test constants specific to quadratic strategy @@ -18,14 +19,14 @@ const ( ) func TestQuadraticStrategy_NewQuadraticStrategy(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) - if strategy.threshold != TestQuadraticThreshold { - t.Errorf("expected threshold %d, got %d", TestQuadraticThreshold, strategy.threshold) + if strategy.lowerThreshold != TestQuadraticThreshold { + t.Errorf("expected threshold %d, got %d", TestQuadraticThreshold, strategy.lowerThreshold) } - if strategy.maxThreshold != TestQuadraticMaxThreshold { - t.Errorf("expected maxThreshold %d, got %d", TestQuadraticMaxThreshold, strategy.maxThreshold) + if strategy.upperThreshold != TestQuadraticMaxThreshold { + t.Errorf("expected maxThreshold %d, got %d", TestQuadraticMaxThreshold, strategy.upperThreshold) } // Test initial state @@ -40,7 +41,7 @@ func TestQuadraticStrategy_NewQuadraticStrategy(t *testing.T) { } func TestQuadraticStrategy_Update(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) tests := []struct { name string @@ -116,7 +117,7 @@ func TestQuadraticStrategy_Update(t *testing.T) { } func TestQuadraticStrategy_QuadraticScaling(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) // Test that intensity scales quadratically between threshold and maxThreshold testPoints := []struct { @@ -144,7 +145,7 @@ func TestQuadraticStrategy_QuadraticScaling(t *testing.T) { } func TestQuadraticStrategy_GetType(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) if strategy.GetType() != config.QuadraticControllerType { t.Errorf("expected GetType() to return %s, got %s", config.QuadraticControllerType, strategy.GetType()) @@ -152,7 +153,7 @@ func TestQuadraticStrategy_GetType(t *testing.T) { } func TestQuadraticStrategy_Reset(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) // Update to build some state strategy.Update(TestQuadraticMaxThreshold) @@ -171,35 +172,20 @@ func TestQuadraticStrategy_Reset(t *testing.T) { } func TestQuadraticStrategy_EdgeCases(t *testing.T) { - t.Run("multiplier less than 1", func(t *testing.T) { - // Test when multiplier results in maxThreshold <= threshold - strategy := NewQuadraticStrategy(TestQuadraticThreshold, 0.5, newTestLogger(t)) - - // Should handle this gracefully without division by zero - intensity := strategy.Update(TestQuadraticThreshold * 2) - - if intensity < TestIntensityMin || intensity > TestIntensityMax { - t.Errorf("expected valid intensity [%f,%f], got %f", TestIntensityMin, TestIntensityMax, intensity) - } - }) - - t.Run("zero threshold", func(t *testing.T) { - strategy := NewQuadraticStrategy(0, TestQuadraticMultiplier, newTestLogger(t)) - - intensity := strategy.Update(1) - - if intensity != TestIntensityMax { - t.Errorf("expected maximum intensity with zero threshold, got %f", intensity) - } + t.Run("max threshold less than threshold", func(t *testing.T) { + require.Panics(t, func() { + // Test when multiplier results in maxThreshold <= threshold + NewQuadraticStrategy(TestQuadraticThreshold, 0, newTestLogger(t)) + }) }) t.Run("very large multiplier", func(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, 100.0, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestLinearThreshold, TestLinearThreshold*2000, newTestLogger(t)) // Even with very large multiplier, should work correctly - intensity := strategy.Update(TestQuadraticThreshold * 2) + intensity := strategy.Update(TestLinearThreshold * 2) - // Should be very low intensity due to large range and quadratic scaling + // Should be very low intensity due to large range and linear scaling if intensity > 0.05 { t.Errorf("expected very low intensity with large multiplier, got %f", intensity) } @@ -207,7 +193,7 @@ func TestQuadraticStrategy_EdgeCases(t *testing.T) { } func TestQuadraticStrategy_Load(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) // Test load consistency after update updateIntensity := strategy.Update(TestQuadraticThreshold + TestQuadraticThreshold/2) @@ -223,7 +209,7 @@ func TestQuadraticStrategy_Load(t *testing.T) { } func TestQuadraticStrategy_IntensityProgression(t *testing.T) { - strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMultiplier, newTestLogger(t)) + strategy := NewQuadraticStrategy(TestQuadraticThreshold, TestQuadraticMaxThreshold, newTestLogger(t)) // Test that intensity increases properly as load increases loads := []uint64{ diff --git a/op-batcher/batcher/throttler/types.go b/op-batcher/batcher/throttler/types.go index 217ca3e698102..55845048e77f3 100644 --- a/op-batcher/batcher/throttler/types.go +++ b/op-batcher/batcher/throttler/types.go @@ -35,8 +35,8 @@ type ThrottleStrategy interface { // ThrottleConfig holds the configuration parameters for throttling type ThrottleConfig struct { - Threshold uint64 - ThrottleTxSize uint64 - ThrottleBlockSize uint64 - AlwaysBlockSize uint64 + TxSizeLowerLimit uint64 + TxSizeUpperLimit uint64 + BlockSizeLowerLimit uint64 + BlockSizeUpperLimit uint64 } diff --git a/op-batcher/batcher/types.go b/op-batcher/batcher/types.go new file mode 100644 index 0000000000000..a03701d73c421 --- /dev/null +++ b/op-batcher/batcher/types.go @@ -0,0 +1,51 @@ +package batcher + +import ( + "github.com/ethereum/go-ethereum/core/types" +) + +type SizedBlock struct { + *types.Block + rawSize uint64 + estimatedDABytes uint64 +} + +func ToSizedBlock(block *types.Block) SizedBlock { + b := SizedBlock{Block: block} + // populate caches + b.RawSize() + b.EstimatedDABytes() + return b +} + +func (b *SizedBlock) RawSize() uint64 { + if b.rawSize == 0 { + b.rawSize = uint64(70) + for _, tx := range b.Transactions() { + // Deposit transactions are not included in batches + if tx.IsDepositTx() { + continue + } + // Add 2 for the overhead of encoding the tx bytes in a RLP list + b.rawSize += tx.Size() + 2 + } + } + return b.rawSize +} + +func (b *SizedBlock) EstimatedDABytes() uint64 { + if b.estimatedDABytes == 0 { + daSize := uint64(70) // estimated overhead of batch metadata + for _, tx := range b.Transactions() { + // Deposit transactions are not included in batches + if tx.IsDepositTx() { + continue + } + // It is safe to assume that the estimated DA size is always a uint64, + // so calling Uint64() is safe + daSize += tx.RollupCostData().EstimatedDASize().Uint64() + } + b.estimatedDABytes = daSize + } + return b.estimatedDABytes +} diff --git a/op-batcher/config/types.go b/op-batcher/config/types.go index adfdb22a97a13..87e75d14f7287 100644 --- a/op-batcher/config/types.go +++ b/op-batcher/config/types.go @@ -34,13 +34,13 @@ func (t ThrottleControllerType) String() string { // ThrottleControllerInfo represents throttle controller information type ThrottleControllerInfo struct { - Type string `json:"type"` - Threshold uint64 `json:"threshold"` - MaxThreshold float64 `json:"max_threshold"` - CurrentLoad uint64 `json:"current_load"` - Intensity float64 `json:"intensity"` - MaxTxSize uint64 `json:"max_tx_size"` - MaxBlockSize uint64 `json:"max_block_size"` + Type string `json:"type"` + LowerThreshold uint64 `json:"lower_threshold"` + UpperThreshold uint64 `json:"upper_threshold"` + CurrentLoad uint64 `json:"current_load"` + Intensity float64 `json:"intensity"` + MaxTxSize uint64 `json:"max_tx_size"` + MaxBlockSize uint64 `json:"max_block_size"` } // PIDConfig represents PID controller configuration for RPC @@ -88,11 +88,12 @@ func (p *PIDConfig) UnmarshalJSON(data []byte) error { } type ThrottleParams struct { - Threshold uint64 - TxSize uint64 - BlockSize uint64 - AlwaysBlockSize uint64 - ThresholdMultiplier float64 + LowerThreshold uint64 + UpperThreshold uint64 + TxSizeLowerLimit uint64 + TxSizeUpperLimit uint64 + BlockSizeLowerLimit uint64 + BlockSizeUpperLimit uint64 PIDConfig *PIDConfig ControllerType ThrottleControllerType Endpoints []string diff --git a/op-batcher/flags/flags.go b/op-batcher/flags/flags.go index af48a118aef58..86faf762f2838 100644 --- a/op-batcher/flags/flags.go +++ b/op-batcher/flags/flags.go @@ -21,14 +21,7 @@ import ( ) const ( - EnvVarPrefix = "OP_BATCHER" - DefaultPIDSampleTime = 2 * time.Second - DefaultPIDKp = 0.33 - DefaultPIDKi = 0.01 - DefaultPIDKd = 0.05 - DefaultPIDIntegralMax = 1000.0 - DefaultPIDOutputMax = 1.0 - DefaultPIDThresholdMultiplier = 2.0 + EnvVarPrefix = "OP_BATCHER" ) func prefixEnvVars(name string) []string { @@ -165,128 +158,7 @@ var ( Value: false, EnvVars: prefixEnvVars("WAIT_NODE_SYNC"), } - ThrottleThresholdFlag = &cli.IntFlag{ - Name: "throttle-threshold", - Usage: "The threshold on pending-blocks-bytes-current beyond which the batcher will instruct the block builder to start throttling transactions with larger DA demands. Zero disables throttling.", - Value: 1_000_000, - EnvVars: prefixEnvVars("THROTTLE_THRESHOLD"), - } - ThrottleTxSizeFlag = &cli.IntFlag{ - Name: "throttle-tx-size", - Usage: "The DA size of transactions to start throttling when we are over the throttle threshold", - Value: 5000, // less than 1% of all transactions should be affected by this limit - EnvVars: prefixEnvVars("THROTTLE_TX_SIZE"), - } - ThrottleBlockSizeFlag = &cli.IntFlag{ - Name: "throttle-block-size", - Usage: "The total DA limit to start imposing on block building when we are over the throttle threshold", - Value: 21_000, // at least 70 transactions per block of up to 300 compressed bytes each. - EnvVars: prefixEnvVars("THROTTLE_BLOCK_SIZE"), - } - ThrottleAlwaysBlockSizeFlag = &cli.IntFlag{ - Name: "throttle-always-block-size", - Usage: "The total DA limit to start imposing on block building at all times", - Value: 130_000, // should be larger than the builder's max-l2-tx-size to prevent endlessly throttling some txs - EnvVars: prefixEnvVars("THROTTLE_ALWAYS_BLOCK_SIZE"), - } - AdditionalThrottlingEndpointsFlag = &cli.StringSliceFlag{ - Name: "additional-throttling-endpoints", - Usage: "Comma-separated list of endpoints to distribute throttling configuration to (in addition to the L2 endpoints specified with --l2-eth-rpc).", - EnvVars: prefixEnvVars("ADDITIONAL_THROTTLING_ENDPOINTS"), - } - ThrottleControllerTypeFlag = &cli.StringFlag{ - Name: "throttle-controller-type", - Usage: "Type of throttle controller to use: 'step' (default), 'linear', 'quadratic' or 'pid' (EXPERIMENTAL - use with caution)", - Value: "step", - EnvVars: prefixEnvVars("THROTTLE_CONTROLLER_TYPE"), - Action: func(ctx *cli.Context, value string) error { - validTypes := []string{"step", "linear", "quadratic", "pid"} - for _, validType := range validTypes { - if value == validType { - return nil - } - } - return fmt.Errorf("throttle-controller-type must be one of %v, got %s", validTypes, value) - }, - } - ThrottlePidKpFlag = &cli.Float64Flag{ - Name: "throttle-pid-kp", - Usage: "EXPERIMENTAL: PID controller proportional gain. Only relevant if --throttle-controller-type is set to 'pid'", - Value: DefaultPIDKp, - EnvVars: prefixEnvVars("THROTTLE_PID_KP"), - Action: func(ctx *cli.Context, value float64) error { - if value < 0 { - return fmt.Errorf("throttle-pid-kp must be >= 0, got %f", value) - } - return nil - }, - } - ThrottlePidKiFlag = &cli.Float64Flag{ - Name: "throttle-pid-ki", - Usage: "EXPERIMENTAL: PID controller integral gain. Only relevant if --throttle-controller-type is set to 'pid'", - Value: DefaultPIDKi, - EnvVars: prefixEnvVars("THROTTLE_PID_KI"), - Action: func(ctx *cli.Context, value float64) error { - if value < 0 { - return fmt.Errorf("throttle-pid-ki must be >= 0, got %f", value) - } - return nil - }, - } - ThrottlePidKdFlag = &cli.Float64Flag{ - Name: "throttle-pid-kd", - Usage: "EXPERIMENTAL: PID controller derivative gain. Only relevant if --throttle-controller-type is set to 'pid'", - Value: DefaultPIDKd, - EnvVars: prefixEnvVars("THROTTLE_PID_KD"), - Action: func(ctx *cli.Context, value float64) error { - if value < 0 { - return fmt.Errorf("throttle-pid-kd must be >= 0, got %f", value) - } - return nil - }, - } - ThrottlePidIntegralMaxFlag = &cli.Float64Flag{ - Name: "throttle-pid-integral-max", - Usage: "EXPERIMENTAL: PID controller maximum integral windup. Only relevant if --throttle-controller-type is set to 'pid'", - Value: DefaultPIDIntegralMax, - EnvVars: prefixEnvVars("THROTTLE_PID_INTEGRAL_MAX"), - Action: func(ctx *cli.Context, value float64) error { - if value <= 0 { - return fmt.Errorf("throttle-pid-integral-max must be > 0, got %f", value) - } - return nil - }, - } - ThrottlePidOutputMaxFlag = &cli.Float64Flag{ - Name: "throttle-pid-output-max", - Usage: "EXPERIMENTAL: PID controller maximum output. Only relevant if --throttle-controller-type is set to 'pid'", - Value: DefaultPIDOutputMax, - EnvVars: prefixEnvVars("THROTTLE_PID_OUTPUT_MAX"), - Action: func(ctx *cli.Context, value float64) error { - if value <= 0 || value > 1.0 { - return fmt.Errorf("throttle-pid-output-max must be between 0 and 1, got %f", value) - } - return nil - }, - } - ThrottlePidSampleTimeFlag = &cli.DurationFlag{ - Name: "throttle-pid-sample-time", - Usage: "EXPERIMENTAL: PID controller sample time interval, default is " + DefaultPIDSampleTime.String(), - Value: DefaultPIDSampleTime, - EnvVars: prefixEnvVars("THROTTLE_PID_SAMPLE_TIME"), - } - ThrottleThresholdMultiplierFlag = &cli.Float64Flag{ - Name: "throttle-threshold-multiplier", - Usage: "Multiplier for the max threshold used by linear and quadratic controllers (multiplied by base threshold)", - Value: DefaultPIDThresholdMultiplier, - EnvVars: prefixEnvVars("THROTTLE_THRESHOLD_MULTIPLIER"), - Action: func(ctx *cli.Context, value float64) error { - if value < 1 { - return fmt.Errorf("throttle-threshold-multiplier must be >= 1, got %f", value) - } - return nil - }, - } + // Legacy Flags SequencerHDPathFlag = txmgr.SequencerHDPathFlag ) @@ -315,22 +187,10 @@ var optionalFlags = []cli.Flag{ DataAvailabilityTypeFlag, ActiveSequencerCheckDurationFlag, CompressionAlgoFlag, - ThrottleThresholdFlag, - ThrottleTxSizeFlag, - ThrottleBlockSizeFlag, - ThrottleAlwaysBlockSizeFlag, - AdditionalThrottlingEndpointsFlag, - ThrottleControllerTypeFlag, - ThrottlePidKpFlag, - ThrottlePidKiFlag, - ThrottlePidKdFlag, - ThrottlePidIntegralMaxFlag, - ThrottlePidOutputMaxFlag, - ThrottlePidSampleTimeFlag, - ThrottleThresholdMultiplierFlag, } func init() { + optionalFlags = append(optionalFlags, ThrottleFlags...) optionalFlags = append(optionalFlags, oprpc.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, opmetrics.CLIFlags(EnvVarPrefix)...) diff --git a/op-batcher/flags/throttle_flags.go b/op-batcher/flags/throttle_flags.go new file mode 100644 index 0000000000000..5e4f5673db667 --- /dev/null +++ b/op-batcher/flags/throttle_flags.go @@ -0,0 +1,179 @@ +package flags + +import ( + "fmt" + "time" + + "github.com/urfave/cli/v2" +) + +const ( + // Block-builder side + DefaultThrottleTxSizeLowerLimit = 150 + DefaultThrottleTxSizeUpperLimit = 20_000 + DefaultThrottleBlockSizeLowerLimit = 2_000 + DefaultThrottleBlockSizeUpperLimit = 130_000 + + // Controller side + DefaultThrottleControllerType = "quadratic" + DefaultThrottleLowerThreshold = 3_200_000 // allows for 4x 6-blob-tx channels at ~131KB per blob + DefaultThrottleUpperThreshold = DefaultThrottleLowerThreshold * 4 + DefaultPIDSampleTime = 2 * time.Second + DefaultPIDKp = 0.33 + DefaultPIDKi = 0.01 + DefaultPIDKd = 0.05 + DefaultPIDIntegralMax = 1000.0 + DefaultPIDOutputMax = 1.0 +) + +var ( + // Block-builder side + AdditionalThrottlingEndpointsFlag = &cli.StringSliceFlag{ + Name: "throttle.additional-endpoints", + Usage: "Comma-separated list of endpoints to distribute throttling configuration to (in addition to the L2 endpoints specified with --l2-eth-rpc).", + EnvVars: prefixEnvVars("THROTTLE_ADDITIONAL_ENDPOINTS"), + } + + // Builder-side Tx-size limits + ThrottleTxSizeLowerLimitFlag = &cli.Uint64Flag{ + Name: "throttle.tx-size-lower-limit", + Usage: "The limit on the DA size of transactions when we are at maximum throttle intensity. 0 means no limits will ever be applied, so consider 1 the smallest effective limit.", + Value: DefaultThrottleTxSizeLowerLimit, + EnvVars: prefixEnvVars("THROTTLE_TX_SIZE_LOWER_LIMIT"), + } + ThrottleTxSizeUpperLimitFlag = &cli.Uint64Flag{ + Name: "throttle.tx-size-upper-limit", + Usage: "The limit on the DA size of transactions when we are at 0+ throttle intensity (limit of the intensity as it approaches 0 from positive values). Not applied when throttling is inactive.", + Value: DefaultThrottleTxSizeUpperLimit, + EnvVars: prefixEnvVars("THROTTLE_TX_SIZE_UPPER_LIMIT"), + } + + // Builder-side block-size limits + ThrottleBlockSizeLowerLimitFlag = &cli.Uint64Flag{ + Name: "throttle.block-size-lower-limit", + Usage: "The limit on the DA size of blocks when we are at maximum throttle intensity (linear and quadratic controllers only). 0 means no limits will ever be applied, so consider 1 the smallest effective limit.", + Value: DefaultThrottleBlockSizeLowerLimit, + EnvVars: prefixEnvVars("THROTTLE_BLOCK_SIZE_LOWER_LIMIT"), + } + ThrottleBlockSizeUpperLimitFlag = &cli.Uint64Flag{ + Name: "throttle.block-size-upper-limit", + Usage: "The limit on the DA size of blocks when we are at 0 throttle intensity (applied when throttling is inactive)", + Value: DefaultThrottleBlockSizeUpperLimit, + EnvVars: prefixEnvVars("THROTTLE_BLOCK_SIZE_UPPER_LIMIT"), + } + + // // Controller side + ThrottleControllerTypeFlag = &cli.StringFlag{ + Name: "throttle.controller-type", + Usage: "Type of throttle controller to use: 'step', 'linear', 'quadratic' (default) or 'pid' (EXPERIMENTAL - use with caution)", + Value: DefaultThrottleControllerType, + EnvVars: prefixEnvVars("THROTTLE_CONTROLLER_TYPE"), + Action: func(ctx *cli.Context, value string) error { + validTypes := []string{"step", "linear", "quadratic", "pid"} + for _, validType := range validTypes { + if value == validType { + return nil + } + } + return fmt.Errorf("throttle.controller-type must be one of %v, got %s", validTypes, value) + }, + } + ThrottleUsafeDABytesLowerThresholdFlag = &cli.Uint64Flag{ + Name: "throttle.unsafe-da-bytes-lower-threshold", + Usage: "The threshold on unsafe_da_bytes beyond which the batcher will start to throttle the block builder. Zero disables throttling.", + Value: DefaultThrottleLowerThreshold, + EnvVars: prefixEnvVars("THROTTLE_UNSAFE_DA_BYTES_LOWER_THRESHOLD"), + } + ThrottleUsafeDABytesUpperThresholdFlag = &cli.Uint64Flag{ + Name: "throttle.unsafe-da-bytes-upper-threshold", + Usage: "Threshold on unsafe_da_bytes at which throttling has the maximum intensity (linear and quadratic controllers only)", + Value: DefaultThrottleUpperThreshold, + EnvVars: prefixEnvVars("THROTTLE_UNSAFE_DA_BYTES_UPPER_THRESHOLD"), + } + + // Controller side (EXPERIMENTAL PID Controller only) + ThrottlePidKpFlag = &cli.Float64Flag{ + Name: "throttle.pid-kp", + Usage: "EXPERIMENTAL: PID controller proportional gain. Only relevant if --throttle-controller-type is set to 'pid'", + Value: DefaultPIDKp, + EnvVars: prefixEnvVars("THROTTLE_PID_KP"), + Action: func(ctx *cli.Context, value float64) error { + if value < 0 { + return fmt.Errorf("throttle-pid-kp must be >= 0, got %f", value) + } + return nil + }, + } + ThrottlePidKiFlag = &cli.Float64Flag{ + Name: "throttle.pid-ki", + Usage: "EXPERIMENTAL: PID controller integral gain. Only relevant if --throttle-controller-type is set to 'pid'", + Value: DefaultPIDKi, + EnvVars: prefixEnvVars("THROTTLE_PID_KI"), + Action: func(ctx *cli.Context, value float64) error { + if value < 0 { + return fmt.Errorf("throttle-pid-ki must be >= 0, got %f", value) + } + return nil + }, + } + ThrottlePidKdFlag = &cli.Float64Flag{ + Name: "throttle.pid-kd", + Usage: "EXPERIMENTAL: PID controller derivative gain. Only relevant if --throttle-controller-type is set to 'pid'", + Value: DefaultPIDKd, + EnvVars: prefixEnvVars("THROTTLE_PID_KD"), + Action: func(ctx *cli.Context, value float64) error { + if value < 0 { + return fmt.Errorf("throttle-pid-kd must be >= 0, got %f", value) + } + return nil + }, + } + ThrottlePidIntegralMaxFlag = &cli.Float64Flag{ + Name: "throttle.pid-integral-max", + Usage: "EXPERIMENTAL: PID controller maximum integral windup. Only relevant if --throttle-controller-type is set to 'pid'", + Value: DefaultPIDIntegralMax, + EnvVars: prefixEnvVars("THROTTLE_PID_INTEGRAL_MAX"), + Action: func(ctx *cli.Context, value float64) error { + if value <= 0 { + return fmt.Errorf("throttle-pid-integral-max must be > 0, got %f", value) + } + return nil + }, + } + ThrottlePidOutputMaxFlag = &cli.Float64Flag{ + Name: "throttle.pid-output-max", + Usage: "EXPERIMENTAL: PID controller maximum output. Only relevant if --throttle-controller-type is set to 'pid'", + Value: DefaultPIDOutputMax, + EnvVars: prefixEnvVars("THROTTLE_PID_OUTPUT_MAX"), + Action: func(ctx *cli.Context, value float64) error { + if value <= 0 || value > 1.0 { + return fmt.Errorf("throttle-pid-output-max must be between 0 and 1, got %f", value) + } + return nil + }, + } + ThrottlePidSampleTimeFlag = &cli.DurationFlag{ + Name: "throttle.pid-sample-time", + Usage: "EXPERIMENTAL: PID controller sample time interval, default is " + DefaultPIDSampleTime.String(), + Value: DefaultPIDSampleTime, + EnvVars: prefixEnvVars("THROTTLE_PID_SAMPLE_TIME"), + } +) + +var ThrottleFlags = []cli.Flag{ + AdditionalThrottlingEndpointsFlag, + ThrottleTxSizeLowerLimitFlag, + ThrottleTxSizeUpperLimitFlag, + ThrottleBlockSizeLowerLimitFlag, + ThrottleBlockSizeUpperLimitFlag, + ThrottleControllerTypeFlag, + ThrottleUsafeDABytesLowerThresholdFlag, + ThrottleUsafeDABytesUpperThresholdFlag, + // PID controller flags (only used when controller type is 'pid') + ThrottlePidKpFlag, + ThrottlePidKiFlag, + ThrottlePidKdFlag, + ThrottlePidIntegralMaxFlag, + ThrottlePidOutputMaxFlag, + ThrottlePidSampleTimeFlag, +} diff --git a/op-batcher/images/linear_throttling.png b/op-batcher/images/linear_throttling.png deleted file mode 100644 index b6fda1440a36a..0000000000000 Binary files a/op-batcher/images/linear_throttling.png and /dev/null differ diff --git a/op-batcher/images/quadratic_throttling.png b/op-batcher/images/quadratic_throttling.png deleted file mode 100644 index 4891aa0afddd1..0000000000000 Binary files a/op-batcher/images/quadratic_throttling.png and /dev/null differ diff --git a/op-batcher/images/step_throttling.png b/op-batcher/images/step_throttling.png deleted file mode 100644 index 3ca2a70ecc03c..0000000000000 Binary files a/op-batcher/images/step_throttling.png and /dev/null differ diff --git a/op-batcher/images/throttling.png b/op-batcher/images/throttling.png new file mode 100644 index 0000000000000..1ad8ad931e1e5 Binary files /dev/null and b/op-batcher/images/throttling.png differ diff --git a/op-batcher/metrics/metrics.go b/op-batcher/metrics/metrics.go index e48c88ae206b1..92e2e1dee8c3b 100644 --- a/op-batcher/metrics/metrics.go +++ b/op-batcher/metrics/metrics.go @@ -8,7 +8,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" @@ -39,8 +38,8 @@ type Metricer interface { RecordL2BlocksLoaded(l2ref eth.L2BlockRef) RecordChannelOpened(id derive.ChannelID, numPendingBlocks int) RecordL2BlocksAdded(l2ref eth.L2BlockRef, numBlocksAdded, numPendingBlocks, inputBytes, outputComprBytes int) - RecordL2BlockInPendingQueue(block *types.Block) - RecordL2BlockInChannel(block *types.Block) + RecordL2BlockInPendingQueue(rawSize, daSize uint64) + RecordL2BlockInChannel(rawSize, daSize uint64) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, numFrames int, inputBytes int, outputComprBytes int, reason error) RecordChannelFullySubmitted(id derive.ChannelID) RecordChannelTimedOut(id derive.ChannelID) @@ -48,7 +47,9 @@ type Metricer interface { RecordThrottleIntensity(intensity float64, controllerType config.ThrottleControllerType) RecordThrottleParams(maxTxSize, maxBlockSize uint64) RecordThrottleControllerType(controllerType config.ThrottleControllerType) - RecordPendingBytesVsThreshold(pendingBytes, threshold uint64, controllerType config.ThrottleControllerType) + RecordUnsafeBytesVsThreshold(unsafeBytes, threshold uint64, controllerType config.ThrottleControllerType) + RecordUnsafeDABytes(int64) + RecordPendingBlockPruned(rawSize, daSize uint64) // PID Controller specific metrics RecordThrottleControllerState(error, integral, derivative float64) @@ -91,6 +92,8 @@ type Metrics struct { pendingDABytes int64 pendingDABytesGaugeFunc prometheus.GaugeFunc + unsafeDABytesGauge prometheus.Gauge + blocksAddedCount prometheus.Gauge channelInputBytes prometheus.GaugeVec @@ -111,7 +114,7 @@ type Metrics struct { throttleMaxTxSize prometheus.Gauge throttleMaxBlockSize prometheus.Gauge throttleControllerType prometheus.GaugeVec - pendingBytesRatio prometheus.GaugeVec + unsafeBytesRatio prometheus.GaugeVec throttleHistory prometheus.Summary // PID Controller specific metrics @@ -254,10 +257,10 @@ func NewMetrics(procName string) *Metrics { Name: "throttle_controller_type", Help: "Type of throttle controller in use", }, []string{"type"}), - pendingBytesRatio: *factory.NewGaugeVec(prometheus.GaugeOpts{ + unsafeBytesRatio: *factory.NewGaugeVec(prometheus.GaugeOpts{ Namespace: ns, - Name: "pending_bytes_ratio", - Help: "Ratio of pending bytes to threshold", + Name: "unsafe_bytes_ratio", + Help: "Ratio of unsafe bytes to threshold", }, []string{"type"}), throttleHistory: factory.NewSummary(prometheus.SummaryOpts{ Namespace: ns, @@ -290,6 +293,11 @@ func NewMetrics(procName string) *Metrics { Help: "Response time of the PID controller", Buckets: prometheus.DefBuckets, }), + unsafeDABytesGauge: factory.NewGauge(prometheus.GaugeOpts{ + Namespace: ns, + Name: "unsafe_da_bytes", + Help: "The estimated number of unsafe DA bytes", + }), } m.pendingDABytesGaugeFunc = factory.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: ns, @@ -386,15 +394,22 @@ func (m *Metrics) RecordChannelClosed(id derive.ChannelID, numPendingBlocks int, m.channelClosedReason.Set(float64(ClosedReasonToNum(reason))) } -func (m *Metrics) RecordL2BlockInPendingQueue(block *types.Block) { - daSize, rawSize := estimateBatchSize(block) +func (m *Metrics) RecordL2BlockInPendingQueue(rawSize, daSize uint64) { m.pendingBlocksBytesTotal.Add(float64(rawSize)) m.pendingBlocksBytesCurrent.Add(float64(rawSize)) atomic.AddInt64(&m.pendingDABytes, int64(daSize)) } -func (m *Metrics) RecordL2BlockInChannel(block *types.Block) { - daSize, rawSize := estimateBatchSize(block) +// This method is called when a pending block is pruned. +// It is a rare edge case where a block is loaded and pruned before it gets into a channel. +// This may happen if a previous batcher instance build a channel with that block +// which was confirmed _after_ the current batcher pulled it from the sequencer. +func (m *Metrics) RecordPendingBlockPruned(rawSize, daSize uint64) { + m.pendingBlocksBytesCurrent.Add(-1.0 * float64(rawSize)) + atomic.AddInt64(&m.pendingDABytes, -1*int64(daSize)) +} + +func (m *Metrics) RecordL2BlockInChannel(rawSize, daSize uint64) { m.pendingBlocksBytesCurrent.Add(-1.0 * float64(rawSize)) atomic.AddInt64(&m.pendingDABytes, -1*int64(daSize)) // Refer to RecordL2BlocksAdded to see the current + count of bytes added to a channel @@ -459,17 +474,21 @@ func (m *Metrics) RecordThrottleControllerType(controllerType config.ThrottleCon } } -func (m *Metrics) RecordPendingBytesVsThreshold(pendingBytes, threshold uint64, controllerType config.ThrottleControllerType) { - ratio := float64(pendingBytes) / float64(threshold) +func (m *Metrics) RecordUnsafeBytesVsThreshold(unsafeBytes, threshold uint64, controllerType config.ThrottleControllerType) { + ratio := float64(unsafeBytes) / float64(threshold) for _, t := range config.ThrottleControllerTypes { if t == controllerType { - m.pendingBytesRatio.WithLabelValues(string(t)).Set(ratio) + m.unsafeBytesRatio.WithLabelValues(string(t)).Set(ratio) } else { - m.pendingBytesRatio.WithLabelValues(string(t)).Set(0) + m.unsafeBytesRatio.WithLabelValues(string(t)).Set(0) } } } +func (m *Metrics) RecordUnsafeDABytes(unsafeDABytes int64) { + m.unsafeDABytesGauge.Set(float64(unsafeDABytes)) +} + // ClearAllStateMetrics clears all state metrics. // // This should cover any metric which is a Gauge and is incremented / decremented rather than "set". @@ -481,26 +500,6 @@ func (m *Metrics) ClearAllStateMetrics() { m.pendingBlocksBytesCurrent.Set(0) } -// estimateBatchSize returns the estimated size of the block in a batch both with compression ('daSize') and without -// ('rawSize'). -func estimateBatchSize(block *types.Block) (daSize, rawSize uint64) { - daSize = uint64(70) // estimated overhead of batch metadata - rawSize = uint64(70) - for _, tx := range block.Transactions() { - // Deposit transactions are not included in batches - if tx.IsDepositTx() { - continue - } - bigSize := tx.RollupCostData().EstimatedDASize() - if bigSize.IsUint64() { // this should always be true, but if not just ignore - daSize += bigSize.Uint64() - } - // Add 2 for the overhead of encoding the tx bytes in a RLP list - rawSize += tx.Size() + 2 - } - return -} - // RecordThrottleControllerState records the state of the PID controller func (m *Metrics) RecordThrottleControllerState(error, integral, derivative float64) { m.pidControllerError.Set(error) diff --git a/op-batcher/metrics/noop.go b/op-batcher/metrics/noop.go index dce831e0f9a80..0d062e67d3931 100644 --- a/op-batcher/metrics/noop.go +++ b/op-batcher/metrics/noop.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" @@ -34,8 +33,9 @@ func (*noopMetrics) RecordLatestL1Block(l1ref eth.L1BlockRef) {} func (*noopMetrics) RecordL2BlocksLoaded(eth.L2BlockRef) {} func (*noopMetrics) RecordChannelOpened(derive.ChannelID, int) {} func (*noopMetrics) RecordL2BlocksAdded(eth.L2BlockRef, int, int, int, int) {} -func (*noopMetrics) RecordL2BlockInPendingQueue(*types.Block) {} -func (*noopMetrics) RecordL2BlockInChannel(*types.Block) {} +func (*noopMetrics) RecordL2BlockInPendingQueue(uint64, uint64) {} +func (*noopMetrics) RecordL2BlockInChannel(uint64, uint64) {} +func (*noopMetrics) RecordPendingBlockPruned(uint64, uint64) {} func (*noopMetrics) RecordChannelClosed(derive.ChannelID, int, int, int, int, error) {} @@ -47,9 +47,11 @@ func (*noopMetrics) RecordThrottleIntensity(intensity float64, controllerType co } func (*noopMetrics) RecordThrottleParams(maxTxSize, maxBlockSize uint64) {} func (*noopMetrics) RecordThrottleControllerType(controllerType config.ThrottleControllerType) {} -func (*noopMetrics) RecordPendingBytesVsThreshold(pendingBytes, threshold uint64, controllerType config.ThrottleControllerType) { +func (*noopMetrics) RecordUnsafeBytesVsThreshold(pendingBytes, threshold uint64, controllerType config.ThrottleControllerType) { } +func (*noopMetrics) RecordUnsafeDABytes(int64) {} + // PID Controller specific metrics func (*noopMetrics) RecordThrottleControllerState(error, integral, derivative float64) {} func (*noopMetrics) RecordThrottleResponseTime(duration time.Duration) {} diff --git a/op-batcher/metrics/test.go b/op-batcher/metrics/test.go index dc4c759ab5b3e..1df3676605d9f 100644 --- a/op-batcher/metrics/test.go +++ b/op-batcher/metrics/test.go @@ -1,9 +1,5 @@ package metrics -import ( - "github.com/ethereum/go-ethereum/core/types" -) - type TestMetrics struct { noopMetrics PendingBlocksBytesCurrent float64 @@ -13,13 +9,11 @@ type TestMetrics struct { var _ Metricer = new(TestMetrics) -func (m *TestMetrics) RecordL2BlockInPendingQueue(block *types.Block) { - daSize, rawSize := estimateBatchSize(block) +func (m *TestMetrics) RecordL2BlockInPendingQueue(rawSize, daSize uint64) { m.PendingBlocksBytesCurrent += float64(rawSize) m.pendingDABytes += float64(daSize) } -func (m *TestMetrics) RecordL2BlockInChannel(block *types.Block) { - daSize, rawSize := estimateBatchSize(block) +func (m *TestMetrics) RecordL2BlockInChannel(rawSize, daSize uint64) { m.PendingBlocksBytesCurrent -= float64(rawSize) m.pendingDABytes -= float64(daSize) } @@ -34,3 +28,8 @@ func (m *TestMetrics) ClearAllStateMetrics() { m.ChannelQueueLength = 0 m.pendingDABytes = 0 } + +func (m *TestMetrics) RecordPendingBlockPruned(rawSize, daSize uint64) { + m.PendingBlocksBytesCurrent -= float64(rawSize) + m.pendingDABytes -= float64(daSize) +} diff --git a/op-batcher/rpc/api.go b/op-batcher/rpc/api.go index 0e8beab00295f..eeb113373c28f 100644 --- a/op-batcher/rpc/api.go +++ b/op-batcher/rpc/api.go @@ -15,6 +15,7 @@ import ( type BatcherDriver interface { StartBatchSubmitting() error StopBatchSubmitting(ctx context.Context) error + Flush(ctx context.Context) error SetThrottleController(controllerType config.ThrottleControllerType, pidConfig *config.PIDConfig) error GetThrottleControllerInfo() (config.ThrottleControllerInfo, error) ResetThrottleController() error @@ -87,3 +88,7 @@ func (a *adminAPI) GetThrottleController(_ context.Context) (config.ThrottleCont func (a *adminAPI) ResetThrottleController(_ context.Context) error { return a.b.ResetThrottleController() } + +func (a *adminAPI) FlushBatcher(ctx context.Context) error { + return a.b.Flush(ctx) +} diff --git a/op-batcher/throttling.md b/op-batcher/throttling.md index ed293c70c44b4..bd4bacb3946d5 100644 --- a/op-batcher/throttling.md +++ b/op-batcher/throttling.md @@ -15,10 +15,22 @@ The throttling system prevents these issues by instructing sequencers to limit b ## Throttling Controller Types -The batcher supports four throttling strategies, each with different response characteristics: +The batcher supports four throttling strategies, each with different response characteristics. The strategies can be understood in this +diagram: +![Throttling Strategies](./images/throttling.png) + +* Each strategy responds to the `unsafe_da_bytes` metric and corresponding thresholds `throttle.unsafe-da-bytes-lower/upper-threshold`, and results a throttling "intensity" between 0 and 1. + +* This intensity is then mapped to a maximum tx size and maximum block size to control the `miner_setMaxDASize(maxTxSize, maxBlockSize)` API calls made to block builders, depending on the configuration variables shown in the diagram above. + +* When the throttling intensity is zero (the `unsafe_da_bytes` is less than `unsafe-da-bytes-lower-threshold`), blocks will continue to be limited at `throttle.block-size-upper-limit`, whereas transactions are not throttled at all (by using `maxTxSize=0`). + +> NOTE +> Be aware that using `0` for either +> `throttle.block-size-lower-limit` and `throttle.tx-size-lower-limit` +> results in no throttling limits being applied (for blocks and transactions respectively). ### Step Controller (Default) -![Step Controller Response](./images/step_throttling.png) **Behavior**: Binary on/off throttling - **Below threshold**: No throttling applied @@ -26,8 +38,10 @@ The batcher supports four throttling strategies, each with different response ch - **Use case**: Simple, predictable throttling behavior - **Best for**: Environments requiring clear, binary throttling states +> [!WARNING] +> If selecting the step controller, you should **not** rely on default throttling parameters as this could cause too much throttling to be applied too quickly. + ### Linear Controller -![Linear Controller Response](./images/linear_throttling.png) **Behavior**: Linear scaling throttling intensity - **Response curve**: Gradual increase from threshold to maximum threshold @@ -36,7 +50,6 @@ The batcher supports four throttling strategies, each with different response ch - **Best for**: Steady load patterns with predictable growth ### Quadratic Controller -![Quadratic Controller Response](./images/quadratic_throttling.png) **Behavior**: Quadratic scaling throttling intensity - **Low overload**: Gentle throttling response @@ -61,43 +74,6 @@ PID Controller is a control mechanism that automatically adjusts the batcher's t > > The PID controller is experimental and should only be used by users with deep understanding of control theory. Improper configuration can lead to system instability, oscillations, or poor performance. Use at your own risk and only with extensive testing. -## Configuration - -### CLI Configuration - -Configure throttling at startup using command-line flags: - -```bash -# Basic throttling parameters ---throttle-threshold=1000000 # Bytes threshold for throttling activation ---throttle-tx-size=128000 # Max transaction size when throttling ---throttle-block-size=2000000 # Max block size when throttling ---throttle-always-block-size=500000 # Always-applied block size limit - -# Controller type and multiplier ---throttle-controller-type=quadratic # Controller type: step, linear, quadratic, pid ---throttle-threshold-multiplier=2.5 # Multiplier for quadratic controller - -# PID-specific parameters (required when using PID controller) ---throttle-pid-kp=0.3 # Proportional gain ---throttle-pid-ki=0.15 # Integral gain ---throttle-pid-kd=0.08 # Derivative gain ---throttle-pid-integral-max=50.0 # Maximum integral accumulation ---throttle-pid-output-max=1.0 # Maximum controller output ---throttle-pid-sample-time=5ms # Controller update frequency - -# Additional endpoints to throttle (e.g., builders in rollup-boost) ---additional-throttling-endpoints=http://builder1:8545,http://builder2:8545 -``` - -### Environment Variables - -```bash -export OP_BATCHER_THROTTLE_THRESHOLD=1000000 -export OP_BATCHER_THROTTLE_CONTROLLER_TYPE=quadratic -export OP_BATCHER_THROTTLE_THRESHOLD_MULTIPLIER=2.5 -``` - ## Runtime Management via RPC The batcher exposes admin RPC endpoints for dynamic throttling control without restarts: diff --git a/op-chain-ops/addresses/contracts.go b/op-chain-ops/addresses/contracts.go index d526e28751156..c531fa27be20d 100644 --- a/op-chain-ops/addresses/contracts.go +++ b/op-chain-ops/addresses/contracts.go @@ -36,6 +36,7 @@ type ImplementationsContracts struct { OpcmStandardValidatorImpl common.Address DelayedWethImpl common.Address OptimismPortalImpl common.Address + OptimismPortalInteropImpl common.Address EthLockboxImpl common.Address PreimageOracleImpl common.Address MipsImpl common.Address diff --git a/op-chain-ops/cmd/check-ecotone/main.go b/op-chain-ops/cmd/check-ecotone/main.go index f0c882387534e..5e52cc70f798b 100644 --- a/op-chain-ops/cmd/check-ecotone/main.go +++ b/op-chain-ops/cmd/check-ecotone/main.go @@ -176,7 +176,7 @@ func makeCommandAction(fn CheckAction) func(c *cli.Context) error { if err != nil { return fmt.Errorf("failed to dial L2 RPC: %w", err) } - rollupCl, err := dial.DialRollupClientWithTimeout(c.Context, time.Second*20, logger, c.String(EndpointRollup.Name)) + rollupCl, err := dial.DialRollupClientWithTimeout(c.Context, logger, c.String(EndpointRollup.Name)) if err != nil { return fmt.Errorf("failed to dial rollup node RPC: %w", err) } @@ -500,7 +500,7 @@ func checkBlobTxDenial(ctx context.Context, env *actionEnv) error { for i := 0; i < 4096; i++ { blob[32*i] &= 0b0011_1111 } - sidecar, blobHashes, err := txmgr.MakeSidecar([]*eth.Blob{&blob}) + sidecar, blobHashes, err := txmgr.MakeSidecar([]*eth.Blob{&blob}, false) if err != nil { return fmt.Errorf("failed to make sidecar: %w", err) } @@ -512,7 +512,7 @@ func checkBlobTxDenial(ctx context.Context, env *actionEnv) error { return fmt.Errorf("the L1 block %s (time %d) is not ecotone yet", latestHeader.Hash(), latestHeader.Time) } - blobBaseFee := eth.CalcBlobFeeDefault(latestHeader) + blobBaseFee := eth.CalcBlobFeeCancun(*latestHeader.ExcessBlobGas) blobFeeCap := new(uint256.Int).Mul(uint256.NewInt(2), uint256.MustFromBig(blobBaseFee)) if blobFeeCap.Lt(uint256.NewInt(params.GWei)) { // ensure we meet 1 gwei geth tx-pool minimum blobFeeCap = uint256.NewInt(params.GWei) diff --git a/op-chain-ops/cmd/check-prestate/main.go b/op-chain-ops/cmd/check-prestate/main.go index a08c8a3d840e7..a3a5864ddf167 100644 --- a/op-chain-ops/cmd/check-prestate/main.go +++ b/op-chain-ops/cmd/check-prestate/main.go @@ -5,14 +5,13 @@ import ( "encoding/json" "flag" "fmt" - "io" - "net/http" "os" - "os/exec" - "path/filepath" "strings" "github.com/BurntSushi/toml" + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/prestate" + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/registry" + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/types" "github.com/ethereum-optimism/optimism/op-program/prestates" oplog "github.com/ethereum-optimism/optimism/op-service/log" "github.com/ethereum/go-ethereum/common" @@ -20,45 +19,14 @@ import ( "github.com/ethereum/go-ethereum/superchain" "github.com/mattn/go-isatty" "golang.org/x/exp/maps" - "golang.org/x/mod/modfile" ) -const ( - monorepoGoModAtTag = "https://github.com/ethereum-optimism/optimism/raw/refs/tags/%s/go.mod" - superchainRegistryCommitAtRef = "https://github.com/ethereum-optimism/op-geth/raw/%s/superchain-registry-commit.txt" - superchainConfigsZipAtTag = "https://github.com/ethereum-optimism/op-geth/raw/refs/tags/%s/superchain/superchain-configs.zip" - syncSuperchainScript = "https://github.com/ethereum-optimism/op-geth/raw/refs/heads/optimism/sync-superchain.sh" -) - -type PrestateInfo struct { - Hash common.Hash `json:"hash"` - Version string `json:"version"` - Type string `json:"type"` - - OpProgram CommitInfo `json:"op-program"` - OpGeth CommitInfo `json:"op-geth"` - SuperchainRegistry CommitInfo `json:"superchain-registry"` - - UpToDateChains []string `json:"up-to-date-chains"` - OutdatedChains []OutdatedChain `json:"outdated-chains"` - MissingChains []string `json:"missing-chains"` -} - -type OutdatedChain struct { - Name string `json:"name"` - Diff *Diff `json:"diff,omitempty"` -} - -type CommitInfo struct { - Commit string `json:"commit"` - DiffUrl string `json:"diff-url"` - DiffCmd string `json:"diff-cmd"` -} - -type Diff struct { - Msg string `json:"message"` - Prestate any `json:"prestate"` - Latest any `json:"latest"` +type FPProgramType interface { + FindVersions(log log.Logger, prestateVersion string) ( + elCommitInfo types.CommitInfo, + fppCommitInfo types.CommitInfo, + superChainRegistryCommit string, + prestateConfigs *superchain.ChainConfigLoader) } func main() { @@ -77,6 +45,9 @@ func main() { flag.StringVar(&prestateHashStr, "prestate-hash", "", "Specify the absolute prestate hash to verify") flag.StringVar(&chainsStr, "chains", "", "List of chains to consider in the report. Comma separated. Default: all chains in the superchain-registry") + var versionsOverrideFile string + flag.StringVar(&versionsOverrideFile, "versions-file", "", "Override the prestate versions TOML file") + // Parse the command-line arguments flag.Parse() if prestateHashStr == "" { @@ -101,7 +72,7 @@ func main() { log.Crit("--prestate-hash is invalid") } - prestateReleases, err := prestates.LoadReleases("") + prestateReleases, err := prestates.LoadReleases(versionsOverrideFile) if err != nil { log.Crit("Failed to load prestate releases list", "err", err) } @@ -120,50 +91,32 @@ func main() { if prestateVersion == "" { log.Crit("Failed to find a prestate release with hash", "hash", prestateHash) } - prestateTag := fmt.Sprintf("op-program/v%s", prestateVersion) - log.Info("Found prestate", "version", prestateVersion, "type", prestateType, "tag", prestateTag) + log.Info("Found prestate", "version", prestateVersion, "type", prestateType) - modFile, err := fetchMonorepoGoMod(prestateTag) - if err != nil { - log.Crit("Failed to fetch go mod", "err", err) - } - var gethVersion string - for _, replace := range modFile.Replace { - if replace.Old.Path == "github.com/ethereum/go-ethereum" { - gethVersion = replace.New.Version - break - } + var prestateImpl FPProgramType + switch prestateType { + case "cannon32", "cannon64", "interop": + prestateImpl = prestate.NewOPProgramPrestate() + case "cannon-kona": + prestateImpl = prestate.NewKonaPrestate() + default: + log.Crit("Invalid prestate type", "type", prestateType) } - if gethVersion == "" { - log.Crit("Failed to find op-geth replace in go.mod") - } - log.Info("Found op-geth version", "version", gethVersion) - - registryCommitBytes, err := fetch(fmt.Sprintf(superchainRegistryCommitAtRef, gethVersion)) + elCommitInfo, fppCommitInfo, commit, prestateConfigs := prestateImpl.FindVersions(log, prestateVersion) if err != nil { - log.Crit("Failed to fetch superchain registry commit info", "err", err) + log.Crit("Failed to load configuration for prestate info", "err", err) } - commit := strings.TrimSpace(string(registryCommitBytes)) - log.Info("Found superchain registry commit info", "commit", commit) - prestateConfigData, err := fetch(fmt.Sprintf(superchainConfigsZipAtTag, gethVersion)) - if err != nil { - log.Crit("Failed to fetch prestate's superchain registry config zip", "err", err) - } - prestateConfigs, err := superchain.NewChainConfigLoader(prestateConfigData) - if err != nil { - log.Crit("Failed to parse prestate's superchain registry config zip", "err", err) - } prestateNames := prestateConfigs.ChainNames() - latestConfigs, err := latestSuperchainConfigs() + latestConfigs, err := registry.LatestSuperchainConfigs() if err != nil { log.Crit("Failed to get latest superchain configs", "err", err) } knownChains := make(map[string]bool) - var supportedChains []string - outdatedChains := make(map[string]OutdatedChain) + supportedChains := make([]string, 0) // Not null for json serialization + outdatedChains := make(map[string]types.OutdatedChain) for _, name := range prestateNames { if !chainFilter(name) { continue @@ -174,7 +127,7 @@ func main() { log.Crit("Failed to check config", "chain", name, "err", err) } if diff != nil { - outdatedChains[name] = OutdatedChain{ + outdatedChains[name] = types.OutdatedChain{ Name: name, Diff: diff, } @@ -197,12 +150,12 @@ func main() { } } - report := PrestateInfo{ + report := types.PrestateInfo{ Hash: prestateHash, Version: prestateVersion, Type: prestateType, - OpProgram: commitInfo("optimism", prestateTag, "develop", ""), - OpGeth: commitInfo("op-geth", gethVersion, "optimism", ""), + FppProgram: fppCommitInfo, + ExecutionClient: elCommitInfo, SuperchainRegistry: commitInfo("superchain-registry", commit, "main", "superchain"), UpToDateChains: supportedChains, OutdatedChains: maps.Values(outdatedChains), @@ -216,7 +169,7 @@ func main() { } } -func checkConfig(network string, actual *superchain.ChainConfigLoader, expected *superchain.ChainConfigLoader) (*Diff, error) { +func checkConfig(network string, actual *superchain.ChainConfigLoader, expected *superchain.ChainConfigLoader) (*types.Diff, error) { actualChainID, err := actual.ChainIDByName(network) if err != nil { return nil, fmt.Errorf("failed to get actual chain ID for %v: %w", network, err) @@ -226,7 +179,7 @@ func checkConfig(network string, actual *superchain.ChainConfigLoader, expected return nil, fmt.Errorf("failed to get expected chain ID for %v: %w", network, err) } if actualChainID != expectedChainID { - return &Diff{ + return &types.Diff{ Msg: "Chain ID mismatch", Prestate: actualChainID, Latest: expectedChainID, @@ -264,7 +217,7 @@ func checkConfig(network string, actual *superchain.ChainConfigLoader, expected return nil, fmt.Errorf("failed to get genesis for expected chain %v: %w", network, err) } if !bytes.Equal(actualGenesis, expectedGenesis) { - return &Diff{ + return &types.Diff{ Msg: "Genesis mismatch", Prestate: string(actualGenesis), Latest: string(expectedGenesis), @@ -273,7 +226,7 @@ func checkConfig(network string, actual *superchain.ChainConfigLoader, expected return nil, nil } -func checkChainConfig(actual *superchain.ChainConfig, expected *superchain.ChainConfig) (*Diff, error) { +func checkChainConfig(actual *superchain.ChainConfig, expected *superchain.ChainConfig) (*types.Diff, error) { actualStr, err := toml.Marshal(actual) if err != nil { return nil, fmt.Errorf("failed to marshal actual chain config: %w", err) @@ -283,7 +236,7 @@ func checkChainConfig(actual *superchain.ChainConfig, expected *superchain.Chain return nil, fmt.Errorf("failed to marshal expected chain config: %w", err) } if !bytes.Equal(actualStr, expectedStr) { - return &Diff{ + return &types.Diff{ Msg: "Chain config mismatch", Prestate: actual, Latest: expected, @@ -291,70 +244,10 @@ func checkChainConfig(actual *superchain.ChainConfig, expected *superchain.Chain } return nil, nil } - -// latestSuperchainConfigs loads the latest config from the superchain-registry main branch using the -// sync-superchain.sh script from op-geth to create a zip of configs that can be read by op-geth's ChainConfigLoader. -func latestSuperchainConfigs() (*superchain.ChainConfigLoader, error) { - // Download the op-geth script to build the superchain config - script, err := fetch(syncSuperchainScript) - if err != nil { - return nil, fmt.Errorf("failed to fetch sync-superchain.sh script: %w", err) - } - dir, err := os.MkdirTemp("", "checkprestate") - if err != nil { - return nil, fmt.Errorf("failed to create temp dir: %w", err) - } - defer os.RemoveAll(dir) - if err := os.Mkdir(filepath.Join(dir, "superchain"), 0o700); err != nil { - return nil, fmt.Errorf("failed to create superchain dir: %w", err) - } - scriptPath := filepath.Join(dir, "sync-superchain.sh") - if err := os.WriteFile(scriptPath, script, 0o700); err != nil { - return nil, fmt.Errorf("failed to write sync-superchain.sh: %w", err) - } - if err := os.WriteFile(filepath.Join(dir, "superchain-registry-commit.txt"), []byte("main"), 0o600); err != nil { - return nil, fmt.Errorf("failed to write superchain-registry-commit.txt: %w", err) - } - cmd := exec.Command(scriptPath) - cmd.Stdout = os.Stderr - cmd.Stderr = os.Stderr - cmd.Dir = dir - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("failed to build superchain config zip: %w", err) - } - configBytes, err := os.ReadFile(filepath.Join(dir, "superchain/superchain-configs.zip")) - if err != nil { - return nil, fmt.Errorf("failed to read generated superchain-configs.zip: %w", err) - } - return superchain.NewChainConfigLoader(configBytes) -} - -func commitInfo(repository string, commit string, mainBranch string, dir string) CommitInfo { - return CommitInfo{ +func commitInfo(repository string, commit string, mainBranch string, dir string) types.CommitInfo { + return types.CommitInfo{ Commit: commit, DiffUrl: fmt.Sprintf("https://github.com/ethereum-optimism/%s/compare/%s...%s", repository, commit, mainBranch), DiffCmd: fmt.Sprintf("git fetch && git diff %s...origin/%s %s", commit, mainBranch, dir), } } - -func fetchMonorepoGoMod(opProgramTag string) (*modfile.File, error) { - goModUrl := fmt.Sprintf(monorepoGoModAtTag, opProgramTag) - goMod, err := fetch(goModUrl) - if err != nil { - return nil, fmt.Errorf("failed to fetch go.mod: %w", err) - } - - return modfile.Parse("go.mod", goMod, nil) -} - -func fetch(url string) ([]byte, error) { - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("failed to fetch %v: %w", url, err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to fetch %v: %s", url, resp.Status) - } - return io.ReadAll(resp.Body) -} diff --git a/op-chain-ops/cmd/check-prestate/prestate/kona.go b/op-chain-ops/cmd/check-prestate/prestate/kona.go new file mode 100644 index 0000000000000..e9ba0cb30e7fa --- /dev/null +++ b/op-chain-ops/cmd/check-prestate/prestate/kona.go @@ -0,0 +1,87 @@ +package prestate + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/registry" + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/superchain" +) + +type KonaPrestate struct { +} + +func NewKonaPrestate() *KonaPrestate { + return &KonaPrestate{} +} + +func (p *KonaPrestate) FindVersions(log log.Logger, prestateVersion string) ( + elCommitInfo types.CommitInfo, + fppCommitInfo types.CommitInfo, + superChainRegistryCommit string, + prestateConfigs *superchain.ChainConfigLoader) { + + prestateTag := fmt.Sprintf("kona-client/v%s", prestateVersion) + log.Info("Found prestate tag", "tag", prestateTag) + fppCommitInfo = types.NewCommitInfo("op-rs", "kona", prestateTag, "main", "") + + superChainRegistryCommit, err := fetchSuperchainRegistryCommit(prestateTag) + if err != nil { + log.Crit("Failed to fetch superchain registry commit", "err", err) + } + + // Kona doesn't directly depend on op-reth but uses various crates from it. + // Skip attempting to report a specific op-reth version for now. + elCommitInfo = types.CommitInfo{} + + // kona has its own build process to convert superchain-registry config into a custom JSON format it uses + // Rather than re-implement that custom JSON format and work out how to convert it to the go format + // (which could be brittle), we use the op-geth sync process to convert the superchain registry at the same commit + // to the go format directly. This is unfortunately also potentially brittle since we have to use the latest + // sync script from op-geth rather than a fixed version but seems like the lowest risk option. + configs, err := registry.SuperchainConfigsForCommit(superChainRegistryCommit) + if err != nil { + log.Crit("Failed to fetch chain configs for prestate", "err", err) + } + prestateConfigs = configs + return +} + +func fetchSuperchainRegistryCommit(ref string) (string, error) { + endpoint := "https://api.github.com/repos/op-rs/kona/contents/crates/protocol/registry/superchain-registry?ref=" + + url.QueryEscape(ref) + + req, err := http.NewRequest(http.MethodGet, endpoint, nil) + if err != nil { + return "", fmt.Errorf("build request: %w", err) + } + req.Header.Set("Accept", "application/vnd.github+json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", fmt.Errorf("http request: %w", err) + } + defer resp.Body.Close() + + // Parse error payloads from GitHub if status != 200. + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("failed to fetch superchain-registry version, http status: %s", resp.Status) + } + + // Success path: expect a single "submodule" content object with "sha". + var content struct { + Type string `json:"type"` // should be "submodule" + SHA string `json:"sha"` + } + if err := json.NewDecoder(resp.Body).Decode(&content); err != nil { + return "", fmt.Errorf("decode response: %w", err) + } + if content.Type != "submodule" { + return "", fmt.Errorf("expected a submodule got type %q", content.Type) + } + return content.SHA, nil +} diff --git a/op-chain-ops/cmd/check-prestate/prestate/opprogram.go b/op-chain-ops/cmd/check-prestate/prestate/opprogram.go new file mode 100644 index 0000000000000..5360851c2e70d --- /dev/null +++ b/op-chain-ops/cmd/check-prestate/prestate/opprogram.go @@ -0,0 +1,86 @@ +package prestate + +import ( + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/types" + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/util" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/superchain" + "golang.org/x/mod/modfile" +) + +const ( + monorepoGoModAtTag = "https://github.com/ethereum-optimism/optimism/raw/refs/tags/%s/go.mod" + superchainRegistryCommitAtRef = "https://github.com/ethereum-optimism/op-geth/raw/%s/superchain-registry-commit.txt" + superchainConfigsZipAtTag = "https://github.com/ethereum-optimism/op-geth/raw/refs/tags/%s/superchain/superchain-configs.zip" +) + +type OPProgramPrestate struct { +} + +func NewOPProgramPrestate() *OPProgramPrestate { + return &OPProgramPrestate{} +} + +func (p *OPProgramPrestate) FindVersions(log log.Logger, prestateVersion string) ( + elCommitInfo types.CommitInfo, + fppCommitInfo types.CommitInfo, + superChainRegistryCommit string, + prestateConfigs *superchain.ChainConfigLoader, +) { + prestateTag := fmt.Sprintf("op-program/v%s", prestateVersion) + log.Info("Found prestate tag", "tag", prestateTag) + fppCommitInfo = types.NewCommitInfo("ethereum-optimism", "optimism", prestateTag, "develop", "") + + modFile, err := fetchMonorepoGoMod(prestateTag) + if err != nil { + log.Crit("Failed to fetch go mod", "err", err) + } + elVersion := p.findOpGethVersion(log, modFile) + elCommitInfo = types.NewCommitInfo("ethereum-optimism", "op-geth", elVersion, "optimism", "") + + registryCommitBytes, err := util.Fetch(fmt.Sprintf(superchainRegistryCommitAtRef, elVersion)) + if err != nil { + log.Crit("Failed to fetch superchain registry commit info", "err", err) + } + superChainRegistryCommit = strings.TrimSpace(string(registryCommitBytes)) + log.Info("Found superchain registry commit info", "commit", superChainRegistryCommit) + + prestateConfigData, err := util.Fetch(fmt.Sprintf(superchainConfigsZipAtTag, elVersion)) + if err != nil { + log.Crit("Failed to fetch prestate's superchain registry config zip", "err", err) + } + configLoader, err := superchain.NewChainConfigLoader(prestateConfigData) + if err != nil { + log.Crit("Failed to parse prestate's superchain registry config zip", "err", err) + } + prestateConfigs = configLoader + return +} + +func (p *OPProgramPrestate) findOpGethVersion(log log.Logger, modFile *modfile.File) string { + var elVersion string + for _, replace := range modFile.Replace { + if replace.Old.Path == "github.com/ethereum/go-ethereum" { + elVersion = replace.New.Version + break + } + } + if elVersion == "" { + log.Crit("Failed to find op-geth replace in go.mod") + } + log.Info("Found op-geth version", "version", elVersion) + return elVersion +} + +func fetchMonorepoGoMod(opProgramTag string) (*modfile.File, error) { + goModUrl := fmt.Sprintf(monorepoGoModAtTag, opProgramTag) + goMod, err := util.Fetch(goModUrl) + if err != nil { + return nil, fmt.Errorf("failed to fetch go.mod: %w", err) + } + + return modfile.Parse("go.mod", goMod, nil) +} diff --git a/op-chain-ops/cmd/check-prestate/registry/loader.go b/op-chain-ops/cmd/check-prestate/registry/loader.go new file mode 100644 index 0000000000000..ae92550d9a8d2 --- /dev/null +++ b/op-chain-ops/cmd/check-prestate/registry/loader.go @@ -0,0 +1,56 @@ +package registry + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-chain-ops/cmd/check-prestate/util" + "github.com/ethereum/go-ethereum/superchain" +) + +const ( + syncSuperchainScript = "https://raw.githubusercontent.com/ethereum-optimism/op-geth/optimism/sync-superchain.sh" +) + +// LatestSuperchainConfigs loads the latest config from the superchain-registry main branch using the +// sync-superchain.sh script from op-geth to create a zip of configs that can be read by op-geth's ChainConfigLoader. +func LatestSuperchainConfigs() (*superchain.ChainConfigLoader, error) { + return SuperchainConfigsForCommit("main") +} + +func SuperchainConfigsForCommit(registryCommit string) (*superchain.ChainConfigLoader, error) { + // Download the op-geth script to build the superchain config + script, err := util.Fetch(syncSuperchainScript) + if err != nil { + return nil, fmt.Errorf("failed to fetch sync-superchain.sh script: %w", err) + } + dir, err := os.MkdirTemp("", "checkprestate") + if err != nil { + return nil, fmt.Errorf("failed to create temp dir: %w", err) + } + defer os.RemoveAll(dir) + if err := os.Mkdir(filepath.Join(dir, "superchain"), 0o700); err != nil { + return nil, fmt.Errorf("failed to create superchain dir: %w", err) + } + scriptPath := filepath.Join(dir, "sync-superchain.sh") + if err := os.WriteFile(scriptPath, script, 0o700); err != nil { + return nil, fmt.Errorf("failed to write sync-superchain.sh: %w", err) + } + if err := os.WriteFile(filepath.Join(dir, "superchain-registry-commit.txt"), []byte(registryCommit), 0o600); err != nil { + return nil, fmt.Errorf("failed to write superchain-registry-commit.txt: %w", err) + } + cmd := exec.Command(scriptPath) + cmd.Stdout = os.Stderr + cmd.Stderr = os.Stderr + cmd.Dir = dir + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("failed to build superchain config zip: %w", err) + } + configBytes, err := os.ReadFile(filepath.Join(dir, "superchain/superchain-configs.zip")) + if err != nil { + return nil, fmt.Errorf("failed to read generated superchain-configs.zip: %w", err) + } + return superchain.NewChainConfigLoader(configBytes) +} diff --git a/op-chain-ops/cmd/check-prestate/types/types.go b/op-chain-ops/cmd/check-prestate/types/types.go new file mode 100644 index 0000000000000..3f80784df3fe0 --- /dev/null +++ b/op-chain-ops/cmd/check-prestate/types/types.go @@ -0,0 +1,46 @@ +package types + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common" +) + +type PrestateInfo struct { + Hash common.Hash `json:"hash"` + Version string `json:"version"` + Type string `json:"type"` + + FppProgram CommitInfo `json:"fpp-program"` + ExecutionClient CommitInfo `json:"execution-client"` + SuperchainRegistry CommitInfo `json:"superchain-registry"` + + UpToDateChains []string `json:"up-to-date-chains"` + OutdatedChains []OutdatedChain `json:"outdated-chains"` + MissingChains []string `json:"missing-chains"` +} + +type OutdatedChain struct { + Name string `json:"name"` + Diff *Diff `json:"diff,omitempty"` +} + +type CommitInfo struct { + Commit string `json:"commit"` + DiffUrl string `json:"diff-url"` + DiffCmd string `json:"diff-cmd"` +} + +func NewCommitInfo(org string, repository string, commit string, mainBranch string, dir string) CommitInfo { + return CommitInfo{ + Commit: commit, + DiffUrl: fmt.Sprintf("https://github.com/%s/%s/compare/%s...%s", org, repository, commit, mainBranch), + DiffCmd: fmt.Sprintf("git fetch && git diff %s...origin/%s %s", commit, mainBranch, dir), + } +} + +type Diff struct { + Msg string `json:"message"` + Prestate any `json:"prestate"` + Latest any `json:"latest"` +} diff --git a/op-chain-ops/cmd/check-prestate/util/fetch.go b/op-chain-ops/cmd/check-prestate/util/fetch.go new file mode 100644 index 0000000000000..c5935d85a7651 --- /dev/null +++ b/op-chain-ops/cmd/check-prestate/util/fetch.go @@ -0,0 +1,19 @@ +package util + +import ( + "fmt" + "io" + "net/http" +) + +func Fetch(url string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("failed to fetch %v: %w", url, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("failed to fetch %v: %s", url, resp.Status) + } + return io.ReadAll(resp.Body) +} diff --git a/op-chain-ops/cmd/deposit-hash/main.go b/op-chain-ops/cmd/deposit-hash/main.go index 9166a0667c090..8cc37ee8e1860 100644 --- a/op-chain-ops/cmd/deposit-hash/main.go +++ b/op-chain-ops/cmd/deposit-hash/main.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "time" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum/go-ethereum/common" @@ -18,27 +19,45 @@ func main() { flag.StringVar(&txHash, "tx", "", "Deposit transaction hash on L1") flag.Parse() + // Validate required parameters + if rpcURL == "" { + log.Crit("RPC URL is required. Use --rpc flag to specify L1 RPC URL") + } + if txHash == "" { + log.Crit("Transaction hash is required. Use --tx flag to specify deposit transaction hash") + } + depositLogTopic := common.HexToHash("0xb3813568d9991fc951961fcb4c784893574240a28925604d09fc577c55bb7c32") ethClient, err := ethclient.Dial(rpcURL) if err != nil { - log.Crit("Error creating RPC", "err", err) + log.Crit("Error creating RPC client", "rpc", rpcURL, "err", err) } - l1Receipt, err := ethClient.TransactionReceipt(context.TODO(), common.HexToHash(txHash)) + // Use proper context with timeout instead of context.TODO() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + l1Receipt, err := ethClient.TransactionReceipt(ctx, common.HexToHash(txHash)) if err != nil { - log.Crit("Error fetching transaction", "err", err) + log.Crit("Error fetching transaction receipt", "txHash", txHash, "err", err) } + var found bool for _, ethLog := range l1Receipt.Logs { if ethLog.Topics[0].String() == depositLogTopic.String() { + found = true reconstructedDep, err := derive.UnmarshalDepositLogEvent(ethLog) if err != nil { - log.Crit("Failed to parse deposit event ", "err", err) + log.Crit("Failed to parse deposit event", "err", err) } tx := types.NewTx(reconstructedDep) fmt.Println("L2 Tx Hash", tx.Hash().String()) } } + + if !found { + log.Crit("No deposit event found in transaction", "txHash", txHash) + } } diff --git a/op-chain-ops/cmd/op-run-block/main.go b/op-chain-ops/cmd/op-run-block/main.go index 5738fb7ddff23..04195fc142a00 100644 --- a/op-chain-ops/cmd/op-run-block/main.go +++ b/op-chain-ops/cmd/op-run-block/main.go @@ -158,7 +158,7 @@ func mainAction(c *cli.Context) error { if err != nil { return fmt.Errorf("failed to prepare witness data collector: %w", err) } - state.StartPrefetcher("debug", witness) + state.StartPrefetcher("debug", witness, nil) defer func() { // Even if the EVM fails, try to export witness data for the state-transition up to the error. witnessDump := witness.ToExecutionWitness() out, err := json.MarshalIndent(witnessDump, "", " ") @@ -308,6 +308,7 @@ func Process(logger log.Logger, config *params.ChainConfig, header = block.CreateGethHeader() blockHash = block.Hash blockNumber = new(big.Int).SetUint64(uint64(block.Number)) + blockTime = uint64(block.Time) allLogs []*types.Log gp = new(core.GasPool).AddGas(uint64(block.GasLimit)) ) @@ -342,7 +343,7 @@ func Process(logger log.Logger, config *params.ChainConfig, } statedb.SetTxContext(tx.Hash(), i) - receipt, err := core.ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, tx, usedGas, vmenv) + receipt, err := core.ApplyTransactionWithEVM(msg, gp, statedb, blockNumber, blockHash, blockTime, tx, usedGas, vmenv) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) } diff --git a/op-chain-ops/cmd/withdrawal/util.go b/op-chain-ops/cmd/withdrawal/util.go index 21182110369eb..e54a3a26b348a 100644 --- a/op-chain-ops/cmd/withdrawal/util.go +++ b/op-chain-ops/cmd/withdrawal/util.go @@ -84,9 +84,9 @@ func loadDepsetConfig(ctx *cli.Context, depSetFlag string) (depset.DependencySet } func createSupervisorClient(ctx *cli.Context, supervisorFlag string) (*sources.SupervisorClient, error) { - rpcCl, err := dial.DialRPCClientWithTimeout(ctx.Context, 1*time.Minute, log.Root(), ctx.String(supervisorFlag)) + cl, err := dial.DialSupervisorClientWithTimeout(ctx.Context, log.Root(), ctx.String(supervisorFlag)) if err != nil { - return nil, fmt.Errorf("failed to dial rollup client: %w", err) + return nil, fmt.Errorf("failed to dial supervisor: %w", err) } - return sources.NewSupervisorClient(client.NewBaseRPCClient(rpcCl)), nil + return cl, nil } diff --git a/op-chain-ops/devkeys/hd.go b/op-chain-ops/devkeys/hd.go index bf7349cfb932e..64db2df2d3088 100644 --- a/op-chain-ops/devkeys/hd.go +++ b/op-chain-ops/devkeys/hd.go @@ -4,8 +4,8 @@ import ( "crypto/ecdsa" "fmt" + "github.com/base/go-bip39" hdwallet "github.com/ethereum-optimism/go-ethereum-hdwallet" - "github.com/tyler-smith/go-bip39" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" diff --git a/op-chain-ops/foundry/allocs.go b/op-chain-ops/foundry/allocs.go index 715524a35d495..f205055a3d35a 100644 --- a/op-chain-ops/foundry/allocs.go +++ b/op-chain-ops/foundry/allocs.go @@ -113,11 +113,10 @@ func (d *ForgeAllocs) UnmarshalJSON(b []byte) error { for addr, acc := range allocs { acc := acc d.Accounts[addr] = types.Account{ - Code: acc.Code, - Storage: acc.Storage, - Balance: (*uint256.Int)(&acc.Balance).ToBig(), - Nonce: (uint64)(acc.Nonce), - PrivateKey: nil, + Code: acc.Code, + Storage: acc.Storage, + Balance: (*uint256.Int)(&acc.Balance).ToBig(), + Nonce: (uint64)(acc.Nonce), } } return nil diff --git a/op-chain-ops/foundry/sourcefs.go b/op-chain-ops/foundry/sourcefs.go index 964794e5fd650..eecf787e12363 100644 --- a/op-chain-ops/foundry/sourcefs.go +++ b/op-chain-ops/foundry/sourcefs.go @@ -21,6 +21,11 @@ import ( // - `/` a root dir, relative to where the source files are located (as per the compilationTarget metadata in an artifact). type SourceMapFS struct { fs fs.FS + + // optionally, the source-map FS can utilize the build-data of a specific compiler-profile. + // If left empty, assume there is a single compiler profile in the solidity-files-cache, and use that. + // The profile can be changed with SetProfile. Forge uses "default" as default profile name. + profile string } // NewSourceMapFS creates a new SourceMapFS. @@ -31,6 +36,12 @@ func NewSourceMapFS(fs fs.FS) *SourceMapFS { return &SourceMapFS{fs: fs} } +// SetCompilerProfile changes the compiler-profile that is looked +// for when reversing build-info of artifacts. +func (s *SourceMapFS) SetCompilerProfile(profile string) { + s.profile = profile +} + // ForgeBuild represents the JSON content of a forge-build entry in the `artifacts/build-info` output. type ForgeBuild struct { ID string `json:"id"` // ID of the build itself @@ -59,8 +70,8 @@ type ForgeBuildEntry struct { // ForgeBuildInfo represents a JSON entry that enumerates the latest builds per contract per compiler version. type ForgeBuildInfo struct { - // contract name -> solidity version -> build entry - Artifacts map[string]map[string]ForgeBuildEntry `json:"artifacts"` + // contract name -> solidity version -> profile -> build entry + Artifacts map[string]map[string]map[string]ForgeBuildEntry `json:"artifacts"` } // ForgeBuildCache rep @@ -87,7 +98,7 @@ func (s *SourceMapFS) readBuildCache() (*ForgeBuildCache, error) { // ReadSourceIDs reads the source-identifier to source file-path mapping that is needed to translate a source-map // of the given contract, the given compiler version, and within the given source file path. -func (s *SourceMapFS) ReadSourceIDs(path string, contract string, compilerVersion string) (map[srcmap.SourceID]string, error) { +func (s *SourceMapFS) ReadSourceIDs(path string, contract string, compilerVersion string, profile string) (map[srcmap.SourceID]string, error) { buildCache, err := s.readBuildCache() if err != nil { return nil, err @@ -100,13 +111,12 @@ func (s *SourceMapFS) ReadSourceIDs(path string, contract string, compilerVersio if !ok { return nil, fmt.Errorf("contract not found in artifact: %q", contract) } - var buildEntry ForgeBuildEntry + var byProfile map[string]ForgeBuildEntry if compilerVersion != "" { - entry, ok := byCompilerVersion[compilerVersion] + byProfile, ok = byCompilerVersion[compilerVersion] if !ok { return nil, fmt.Errorf("no known build for compiler version: %q", compilerVersion) } - buildEntry = entry } else { if len(byCompilerVersion) == 0 { return nil, errors.New("no known build, unspecified compiler version") @@ -114,8 +124,27 @@ func (s *SourceMapFS) ReadSourceIDs(path string, contract string, compilerVersio if len(byCompilerVersion) > 1 { return nil, fmt.Errorf("no compiler version specified, and more than one option: %s", strings.Join(maps.Keys(byCompilerVersion), ", ")) } - for _, entry := range byCompilerVersion { - buildEntry = entry + // select the only remaining entry + for _, v := range byCompilerVersion { + byProfile = v + } + } + var buildEntry ForgeBuildEntry + if profile != "" { + buildEntry, ok = byProfile[profile] + if !ok { + return nil, fmt.Errorf("no known build for profile: %q", profile) + } + } else { + if len(byProfile) == 0 { + return nil, errors.New("no known build, unspecified profile") + } + if len(byProfile) > 1 { + return nil, fmt.Errorf("no profile specified, and more than one option: %s", strings.Join(maps.Keys(byProfile), ", ")) + } + // select the only remaining entry + for _, v := range byProfile { + buildEntry = v } } build, err := s.readBuild(filepath.ToSlash(buildCache.Paths.BuildInfos), buildEntry.BuildID) @@ -139,7 +168,13 @@ func (s *SourceMapFS) SourceMap(artifact *Artifact, contract string) (*srcmap.So } // The commit suffix is ignored, the core semver part is what is used in the resolution of builds. basicCompilerVersion := strings.SplitN(artifact.Metadata.Compiler.Version, "+", 2)[0] - ids, err := s.ReadSourceIDs(srcPath, contract, basicCompilerVersion) + // Unfortunately, the "metadata" of an artifact does not store which compiler-profile it used. + // It's only part of the artifact name, which we don't have here. + // E.g. `Arithmetic.0.8.15.dispute.json` for "dispute" profile, + // and `Arithmetic.0.8.15.json` for the default profile. + // We allow the user to specify the profile to use here, with SourceMapFS.SetCompilerProfile. + profile := s.profile + ids, err := s.ReadSourceIDs(srcPath, contract, basicCompilerVersion, profile) if err != nil { return nil, fmt.Errorf("failed to read source IDs of %q: %w", srcPath, err) } diff --git a/op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/c79aa2c3b4578aee2dd8f02d20b1aeb6.json b/op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/1bb28cee9518b06fd6ee7bb37b5854cb.json similarity index 54% rename from op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/c79aa2c3b4578aee2dd8f02d20b1aeb6.json rename to op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/1bb28cee9518b06fd6ee7bb37b5854cb.json index 59cd6663b18f0..94fd65fd66d5f 100644 --- a/op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/c79aa2c3b4578aee2dd8f02d20b1aeb6.json +++ b/op-chain-ops/foundry/testdata/srcmaps/artifacts/build-info/1bb28cee9518b06fd6ee7bb37b5854cb.json @@ -1 +1 @@ -{"id":"c79aa2c3b4578aee2dd8f02d20b1aeb6","source_id_to_path":{"0":"src/SimpleStorage.sol","1":"src/StorageLibrary.sol"},"language":"Solidity"} \ No newline at end of file +{"id":"1bb28cee9518b06fd6ee7bb37b5854cb","source_id_to_path":{"0":"src/SimpleStorage.sol","1":"src/StorageLibrary.sol"},"language":"Solidity"} \ No newline at end of file diff --git a/op-chain-ops/foundry/testdata/srcmaps/cache/solidity-files-cache.json b/op-chain-ops/foundry/testdata/srcmaps/cache/solidity-files-cache.json index 47bdef8c69654..9f9317c7eb178 100644 --- a/op-chain-ops/foundry/testdata/srcmaps/cache/solidity-files-cache.json +++ b/op-chain-ops/foundry/testdata/srcmaps/cache/solidity-files-cache.json @@ -1 +1 @@ -{"_format":"","paths":{"artifacts":"test-artifacts","build_infos":"artifacts/build-info","sources":"src","tests":"test","scripts":"scripts","libraries":["lib","node_modules"]},"files":{"src/SimpleStorage.sol":{"lastModificationDate":1724351550959,"contentHash":"25499c2e202ada22ebd26f8e886cc2e1","sourceName":"src/SimpleStorage.sol","compilerSettings":{"solc":{"optimizer":{"enabled":true,"runs":999999},"metadata":{"useLiteralContent":false,"bytecodeHash":"none","appendCBOR":true},"outputSelection":{"*":{"":["ast"],"*":["abi","evm.bytecode","evm.deployedBytecode","evm.methodIdentifiers","metadata","storageLayout","devdoc","userdoc"]}},"evmVersion":"cancun","viaIR":false,"libraries":{}},"vyper":{"evmVersion":"cancun","outputSelection":{"*":{"*":["abi","evm.bytecode","evm.deployedBytecode"]}}}},"imports":["src/StorageLibrary.sol"],"versionRequirement":"=0.8.15","artifacts":{"SimpleStorage":{"0.8.15":{"path":"SimpleStorage.sol/SimpleStorage.json","build_id":"c79aa2c3b4578aee2dd8f02d20b1aeb6"}}},"seenByCompiler":true},"src/StorageLibrary.sol":{"lastModificationDate":1724351550967,"contentHash":"61545ea51326b6aa0e3bafaf3116b0a8","sourceName":"src/StorageLibrary.sol","compilerSettings":{"solc":{"optimizer":{"enabled":true,"runs":999999},"metadata":{"useLiteralContent":false,"bytecodeHash":"none","appendCBOR":true},"outputSelection":{"*":{"":["ast"],"*":["abi","evm.bytecode","evm.deployedBytecode","evm.methodIdentifiers","metadata","storageLayout","devdoc","userdoc"]}},"evmVersion":"cancun","viaIR":false,"libraries":{}},"vyper":{"evmVersion":"cancun","outputSelection":{"*":{"*":["abi","evm.bytecode","evm.deployedBytecode"]}}}},"imports":[],"versionRequirement":"=0.8.15","artifacts":{"StorageLibrary":{"0.8.15":{"path":"StorageLibrary.sol/StorageLibrary.json","build_id":"c79aa2c3b4578aee2dd8f02d20b1aeb6"}}},"seenByCompiler":true}},"builds":["c79aa2c3b4578aee2dd8f02d20b1aeb6"]} \ No newline at end of file +{"_format":"","paths":{"artifacts":"test-artifacts","build_infos":"artifacts/build-info","sources":"src","tests":"test","scripts":"scripts","libraries":["lib","node_modules"]},"files":{"src/SimpleStorage.sol":{"lastModificationDate":1747615251310,"contentHash":"25499c2e202ada22ebd26f8e886cc2e1","interfaceReprHash":null,"sourceName":"src/SimpleStorage.sol","imports":["src/StorageLibrary.sol"],"versionRequirement":"=0.8.15","artifacts":{"SimpleStorage":{"0.8.15":{"default":{"path":"SimpleStorage.sol/SimpleStorage.json","build_id":"1bb28cee9518b06fd6ee7bb37b5854cb"}}}},"seenByCompiler":true},"src/StorageLibrary.sol":{"lastModificationDate":1747615251310,"contentHash":"61545ea51326b6aa0e3bafaf3116b0a8","interfaceReprHash":null,"sourceName":"src/StorageLibrary.sol","imports":[],"versionRequirement":"=0.8.15","artifacts":{"StorageLibrary":{"0.8.15":{"default":{"path":"StorageLibrary.sol/StorageLibrary.json","build_id":"1bb28cee9518b06fd6ee7bb37b5854cb"}}}},"seenByCompiler":true}},"builds":["1bb28cee9518b06fd6ee7bb37b5854cb"],"profiles":{"default":{"solc":{"optimizer":{"enabled":true,"runs":999999},"metadata":{"useLiteralContent":false,"bytecodeHash":"none","appendCBOR":true},"outputSelection":{"*":{"":["ast"],"*":["abi","evm.bytecode.object","evm.bytecode.sourceMap","evm.bytecode.linkReferences","evm.deployedBytecode.object","evm.deployedBytecode.sourceMap","evm.deployedBytecode.linkReferences","evm.deployedBytecode.immutableReferences","evm.methodIdentifiers","metadata","storageLayout","devdoc","userdoc"]}},"evmVersion":"cancun","viaIR":false,"libraries":{}},"vyper":{"evmVersion":"cancun","outputSelection":{"*":{"*":["abi","evm.bytecode","evm.deployedBytecode"]}}}}},"preprocessed":false,"mocks":[]} \ No newline at end of file diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index 9d4a905bc20b6..10e7daa1d11fa 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -378,9 +378,22 @@ type UpgradeScheduleDeployConfig struct { // When Prague activates. Relative to L1 genesis. L1PragueTimeOffset *hexutil.Uint64 `json:"l1PragueTimeOffset,omitempty"` + // QKC changes: // L2GenesisBlobTimeOffset is the number of seconds after genesis block that the L2Blob hard fork activates. // Set it to 0 to activate at genesis. Nil to disable L2Blob. L2GenesisBlobTimeOffset *hexutil.Uint64 `json:"l2GenesisBlobTimeOffset,omitempty"` + // When Osaka activates. Relative to L1 genesis. + L1OsakaTimeOffset *hexutil.Uint64 `json:"l1OsakaTimeOffset,omitempty"` + // When BPO1 activates. Relative to L1 genesis. + L1BPO1TimeOffset *hexutil.Uint64 `json:"l1BPO1TimeOffset,omitempty"` + // When BPO2 activates. Relative to L1 genesis. + L1BPO2TimeOffset *hexutil.Uint64 `json:"l1BPO2TimeOffset,omitempty"` + // When BPO3 activates. Relative to L1 genesis. + L1BPO3TimeOffset *hexutil.Uint64 `json:"l1BPO3TimeOffset,omitempty"` + // When BPO4 activates. Relative to L1 genesis. + L1BPO4TimeOffset *hexutil.Uint64 `json:"l1BPO4TimeOffset,omitempty"` + // Blob schedule config. + L1BlobScheduleConfig *params.BlobScheduleConfig `json:"l1BlobScheduleConfig,omitempty"` } var _ ConfigChecker = (*UpgradeScheduleDeployConfig)(nil) @@ -415,10 +428,10 @@ func (d *UpgradeScheduleDeployConfig) ForkTimeOffset(fork rollup.ForkName) *uint return (*uint64)(d.L2GenesisHoloceneTimeOffset) case rollup.Isthmus: return (*uint64)(d.L2GenesisIsthmusTimeOffset) - case rollup.Interop: - return (*uint64)(d.L2GenesisInteropTimeOffset) case rollup.Jovian: return (*uint64)(d.L2GenesisJovianTimeOffset) + case rollup.Interop: + return (*uint64)(d.L2GenesisInteropTimeOffset) default: panic(fmt.Sprintf("unknown fork: %s", fork)) } @@ -442,10 +455,10 @@ func (d *UpgradeScheduleDeployConfig) SetForkTimeOffset(fork rollup.ForkName, of d.L2GenesisHoloceneTimeOffset = (*hexutil.Uint64)(offset) case rollup.Isthmus: d.L2GenesisIsthmusTimeOffset = (*hexutil.Uint64)(offset) - case rollup.Interop: - d.L2GenesisInteropTimeOffset = (*hexutil.Uint64)(offset) case rollup.Jovian: d.L2GenesisJovianTimeOffset = (*hexutil.Uint64)(offset) + case rollup.Interop: + d.L2GenesisInteropTimeOffset = (*hexutil.Uint64)(offset) default: panic(fmt.Sprintf("unknown fork: %s", fork)) } @@ -557,8 +570,8 @@ func (d *UpgradeScheduleDeployConfig) forks() []Fork { {L2GenesisTimeOffset: d.L2GenesisGraniteTimeOffset, Name: string(L2AllocsGranite)}, {L2GenesisTimeOffset: d.L2GenesisHoloceneTimeOffset, Name: string(L2AllocsHolocene)}, {L2GenesisTimeOffset: d.L2GenesisIsthmusTimeOffset, Name: string(L2AllocsIsthmus)}, - {L2GenesisTimeOffset: d.L2GenesisInteropTimeOffset, Name: string(L2AllocsInterop)}, {L2GenesisTimeOffset: d.L2GenesisJovianTimeOffset, Name: string(L2AllocsJovian)}, + {L2GenesisTimeOffset: d.L2GenesisInteropTimeOffset, Name: string(L2AllocsInterop)}, } } @@ -1210,6 +1223,7 @@ type L1Deployments struct { OptimismMintableERC20Factory common.Address `json:"OptimismMintableERC20Factory"` OptimismMintableERC20FactoryProxy common.Address `json:"OptimismMintableERC20FactoryProxy"` OptimismPortal common.Address `json:"OptimismPortal"` + OptimismPortalInterop common.Address `json:"OptimismPortalInterop"` OptimismPortalProxy common.Address `json:"OptimismPortalProxy"` ETHLockbox common.Address `json:"ETHLockbox"` ETHLockboxProxy common.Address `json:"ETHLockboxProxy"` @@ -1237,6 +1251,7 @@ func CreateL1DeploymentsFromContracts(contracts *addresses.L1Contracts) *L1Deplo OptimismMintableERC20Factory: contracts.OptimismMintableErc20FactoryImpl, OptimismMintableERC20FactoryProxy: contracts.OptimismMintableErc20FactoryProxy, OptimismPortal: contracts.OptimismPortalImpl, + OptimismPortalInterop: contracts.OptimismPortalInteropImpl, OptimismPortalProxy: contracts.OptimismPortalProxy, ETHLockbox: contracts.EthLockboxImpl, ETHLockboxProxy: contracts.EthLockboxProxy, diff --git a/op-chain-ops/genesis/config_test.go b/op-chain-ops/genesis/config_test.go index ad2308cc89c11..66ac9139c0119 100644 --- a/op-chain-ops/genesis/config_test.go +++ b/op-chain-ops/genesis/config_test.go @@ -191,10 +191,6 @@ func TestUpgradeScheduleDeployConfig_ActivateForkAtOffset(t *testing.T) { func TestUpgradeScheduleDeployConfig_SolidityForkNumber(t *testing.T) { // Iterate over all of them in case more are added for i, fork := range scheduleableForks[2:] { - if fork == "interop" { - continue - } - var d UpgradeScheduleDeployConfig d.ActivateForkAtOffset(fork, 0) require.EqualValues(t, i+1, d.SolidityForkNumber(uint64(42))) @@ -211,8 +207,8 @@ func TestUpgradeScheduleDeployConfig_SolidityForkNumber(t *testing.T) { {rollup.Granite, 4}, {rollup.Holocene, 5}, {rollup.Isthmus, 6}, - {rollup.Interop, 7}, - {rollup.Jovian, 8}, + {rollup.Jovian, 7}, + {rollup.Interop, 8}, } for _, tt := range tests { var d UpgradeScheduleDeployConfig diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index 622f98926278c..94f02286d53ac 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -22,6 +22,9 @@ const defaultGasLimit = 30_000_000 // HoloceneExtraData represents the default extra data for Holocene-genesis chains. var HoloceneExtraData = eip1559.EncodeHoloceneExtraData(250, 6) +// MinBaseFeeExtraData represents the default extra data for Jovian-genesis chains. +var MinBaseFeeExtraData = eip1559.EncodeMinBaseFeeExtraData(250, 6, 0) + // NewL2Genesis will create a new L2 genesis func NewL2Genesis(config *DeployConfig, l1StartHeader *eth.BlockRef) (*core.Genesis, error) { if config.L2ChainID == 0 { @@ -131,6 +134,9 @@ func NewL2Genesis(config *DeployConfig, l1StartHeader *eth.BlockRef) (*core.Gene if optimismChainConfig.IsIsthmus(genesis.Timestamp) { genesis.Alloc[params.HistoryStorageAddress] = types.Account{Nonce: 1, Code: params.HistoryStorageCode, Balance: common.Big0} } + if optimismChainConfig.IsMinBaseFee(genesis.Timestamp) { + genesis.ExtraData = MinBaseFeeExtraData + } return genesis, nil } @@ -144,6 +150,12 @@ func NewL1Genesis(config *DeployConfig) (*core.Genesis, error) { DevL1DeployConfig: config.DevL1DeployConfig, L1ChainID: eth.ChainIDFromUInt64(config.L1ChainID), L1PragueTimeOffset: (*uint64)(config.L1PragueTimeOffset), + L1OsakaTimeOffset: (*uint64)(config.L1OsakaTimeOffset), + L1BPO1TimeOffset: (*uint64)(config.L1BPO1TimeOffset), + L1BPO2TimeOffset: (*uint64)(config.L1BPO2TimeOffset), + L1BPO3TimeOffset: (*uint64)(config.L1BPO3TimeOffset), + L1BPO4TimeOffset: (*uint64)(config.L1BPO4TimeOffset), + BlobScheduleConfig: config.L1BlobScheduleConfig, }) } @@ -153,6 +165,18 @@ type DevL1DeployConfigMinimal struct { L1ChainID eth.ChainID // When Prague activates. Relative to L1 genesis. L1PragueTimeOffset *uint64 + // When Osaka activates. Relative to L1 genesis. + L1OsakaTimeOffset *uint64 + // When BPO1 activates. Relative to L1 genesis. + L1BPO1TimeOffset *uint64 + // When BPO2 activates. Relative to L1 genesis. + L1BPO2TimeOffset *uint64 + // When BPO3 activates. Relative to L1 genesis. + L1BPO3TimeOffset *uint64 + // When BPO4 activates. Relative to L1 genesis. + L1BPO4TimeOffset *uint64 + // Blob schedule config. + BlobScheduleConfig *params.BlobScheduleConfig } // NewL1GenesisMinimal creates a L1 dev genesis template. @@ -208,6 +232,29 @@ func NewL1GenesisMinimal(config *DevL1DeployConfigMinimal) (*core.Genesis, error pragueTime := uint64(timestamp) + uint64(*config.L1PragueTimeOffset) chainConfig.PragueTime = &pragueTime } + if config.L1OsakaTimeOffset != nil { + osakaTime := uint64(timestamp) + uint64(*config.L1OsakaTimeOffset) + chainConfig.OsakaTime = &osakaTime + } + if config.L1BPO1TimeOffset != nil { + bpo1Time := uint64(timestamp) + uint64(*config.L1BPO1TimeOffset) + chainConfig.BPO1Time = &bpo1Time + } + if config.L1BPO2TimeOffset != nil { + bpo2Time := uint64(timestamp) + uint64(*config.L1BPO2TimeOffset) + chainConfig.BPO2Time = &bpo2Time + } + if config.L1BPO3TimeOffset != nil { + bpo3Time := uint64(timestamp) + uint64(*config.L1BPO3TimeOffset) + chainConfig.BPO3Time = &bpo3Time + } + if config.L1BPO4TimeOffset != nil { + bpo4Time := uint64(timestamp) + uint64(*config.L1BPO4TimeOffset) + chainConfig.BPO4Time = &bpo4Time + } + if config.BlobScheduleConfig != nil { + chainConfig.BlobScheduleConfig = config.BlobScheduleConfig + } // Note: excess-blob-gas, blob-gas-used, withdrawals-hash, requests-hash are set to reasonable defaults for L1 by the ToBlock() function return &core.Genesis{ Config: &chainConfig, diff --git a/op-chain-ops/interopgen/configs.go b/op-chain-ops/interopgen/configs.go index a9bd25a316e50..5182a41fb1470 100644 --- a/op-chain-ops/interopgen/configs.go +++ b/op-chain-ops/interopgen/configs.go @@ -35,8 +35,6 @@ type SuperFaultProofConfig struct { } type OPCMImplementationsConfig struct { - L1ContractsRelease string - FaultProof SuperFaultProofConfig } diff --git a/op-chain-ops/interopgen/deploy.go b/op-chain-ops/interopgen/deploy.go index 87736f6cbb236..987e6683ae3b4 100644 --- a/op-chain-ops/interopgen/deploy.go +++ b/op-chain-ops/interopgen/deploy.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis/beacondeposit" "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/manage" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -38,9 +39,6 @@ func Deploy(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceMap if !cfg.L1.ChainID.IsUint64() || cfg.L1.ChainID.Uint64() != l2Cfg.L1ChainID { return nil, nil, fmt.Errorf("chain L2 %s declared different L1 chain ID %d in config than global %d", id, l2Cfg.L1ChainID, cfg.L1.ChainID) } - if l2Cfg.L2GenesisJovianTimeOffset != nil { - return nil, nil, fmt.Errorf("jovian is not compatible with interop, but got fork offset %d", *l2Cfg.L2GenesisJovianTimeOffset) - } } deployments := &WorldDeployment{ @@ -156,7 +154,7 @@ func CreateL2(logger log.Logger, fa *foundry.ArtifactsFS, srcFS *foundry.SourceM } l2Host := script.NewHost(logger.New("role", "l2", "chain", l2Cfg.L2ChainID), fa, srcFS, l2Context) l2Host.SetEnvVar("OUTPUT_MODE", "none") // we don't use the cheatcode, but capture the state outside of EVM execution - l2Host.SetEnvVar("FORK", "holocene") // latest fork + l2Host.SetEnvVar("FORK", "jovian") // latest fork return l2Host } @@ -194,11 +192,15 @@ func DeploySuperchainToL1(l1Host *script.Host, opcmScripts *opcm.Scripts, superC ProofMaturityDelaySeconds: superCfg.Implementations.FaultProof.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: superCfg.Implementations.FaultProof.DisputeGameFinalityDelaySeconds, MipsVersion: superCfg.Implementations.FaultProof.MipsVersion, - L1ContractsRelease: superCfg.Implementations.L1ContractsRelease, + DevFeatureBitmap: deployer.OptimismPortalInteropDevFlag, + FaultGameV2MaxGameDepth: big.NewInt(73), + FaultGameV2SplitDepth: big.NewInt(30), + FaultGameV2ClockExtension: big.NewInt(10800), + FaultGameV2MaxClockDuration: big.NewInt(302400), SuperchainProxyAdmin: superDeployment.SuperchainProxyAdmin, SuperchainConfigProxy: superDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: superDeployment.ProtocolVersionsProxy, - UpgradeController: superCfg.ProxyAdminOwner, + L1ProxyAdminOwner: superCfg.ProxyAdminOwner, Challenger: superCfg.Challenger, }) if err != nil { @@ -224,25 +226,33 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme l1Host.SetTxOrigin(cfg.Deployer) - output, err := opcm.DeployOPChain(l1Host, opcm.DeployOPChainInput{ - OpChainProxyAdminOwner: superCfg.ProxyAdminOwner, - SystemConfigOwner: cfg.SystemConfigOwner, - Batcher: cfg.BatchSenderAddress, - UnsafeBlockSigner: cfg.P2PSequencerAddress, - Proposer: cfg.Proposer, - Challenger: cfg.Challenger, - BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, - BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, - L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), - Opcm: superDeployment.Opcm, - SaltMixer: cfg.SaltMixer, - GasLimit: cfg.GasLimit, - DisputeGameType: cfg.DisputeGameType, - DisputeAbsolutePrestate: cfg.DisputeAbsolutePrestate, - DisputeMaxGameDepth: cfg.DisputeMaxGameDepth, - DisputeSplitDepth: cfg.DisputeSplitDepth, - DisputeClockExtension: cfg.DisputeClockExtension, - DisputeMaxClockDuration: cfg.DisputeMaxClockDuration, + deployOPChainScript, err := opcm.NewDeployOPChainScript(l1Host) + if err != nil { + return nil, fmt.Errorf("failed to load DeployOPChain script: %w", err) + } + + output, err := deployOPChainScript.Run(opcm.DeployOPChainInput{ + OpChainProxyAdminOwner: superCfg.ProxyAdminOwner, + SystemConfigOwner: cfg.SystemConfigOwner, + Batcher: cfg.BatchSenderAddress, + UnsafeBlockSigner: cfg.P2PSequencerAddress, + Proposer: cfg.Proposer, + Challenger: cfg.Challenger, + BasefeeScalar: cfg.GasPriceOracleBaseFeeScalar, + BlobBaseFeeScalar: cfg.GasPriceOracleBlobBaseFeeScalar, + L2ChainId: new(big.Int).SetUint64(cfg.L2ChainID), + Opcm: superDeployment.Opcm, + SaltMixer: cfg.SaltMixer, + GasLimit: cfg.GasLimit, + DisputeGameType: cfg.DisputeGameType, + DisputeAbsolutePrestate: cfg.DisputeAbsolutePrestate, + DisputeMaxGameDepth: new(big.Int).SetUint64(cfg.DisputeMaxGameDepth), + DisputeSplitDepth: new(big.Int).SetUint64(cfg.DisputeSplitDepth), + DisputeClockExtension: cfg.DisputeClockExtension, + DisputeMaxClockDuration: cfg.DisputeMaxClockDuration, + AllowCustomDisputeParameters: true, + OperatorFeeScalar: cfg.GasPriceOracleOperatorFeeScalar, + OperatorFeeConstant: cfg.GasPriceOracleOperatorFeeConstant, }) if err != nil { return nil, fmt.Errorf("failed to deploy L2 OP chain: %w", err) @@ -250,7 +260,7 @@ func DeployL2ToL1(l1Host *script.Host, superCfg *SuperchainConfig, superDeployme // Collect deployment addresses return &L2Deployment{ - L2OpchainDeployment: L2OpchainDeployment(output), + L2OpchainDeployment: NewL2OPChainDeploymentFromDeployOPChainOutput(output), }, nil } diff --git a/op-chain-ops/interopgen/deployments.go b/op-chain-ops/interopgen/deployments.go index d9de49646b8a3..e1d6ee308a38f 100644 --- a/op-chain-ops/interopgen/deployments.go +++ b/op-chain-ops/interopgen/deployments.go @@ -1,6 +1,7 @@ package interopgen import ( + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" "github.com/ethereum/go-ethereum/common" ) @@ -18,6 +19,7 @@ type Implementations struct { OpcmStandardValidator common.Address `json:"OPCMStandardValidator"` DelayedWETHImpl common.Address `json:"DelayedWETHImpl"` OptimismPortalImpl common.Address `json:"OptimismPortalImpl"` + OptimismPortalInteropImpl common.Address `json:"OptimismPortalInteropImpl"` ETHLockboxImpl common.Address `json:"ETHLockboxImpl"` PreimageOracleSingleton common.Address `json:"PreimageOracleSingleton"` MipsSingleton common.Address `json:"MipsSingleton"` @@ -30,6 +32,8 @@ type Implementations struct { AnchorStateRegistryImpl common.Address `json:"AnchorStateRegistryImpl"` SuperchainConfigImpl common.Address `json:"SuperchainConfigImpl"` ProtocolVersionsImpl common.Address `json:"ProtocolVersionsImpl"` + FaultDisputeGameV2Impl common.Address `json:"FaultDisputeGameV2Impl"` + PermissionedDisputeGameV2Impl common.Address `json:"PermissionedDisputeGameV2Impl"` } type SuperchainDeployment struct { @@ -63,6 +67,27 @@ type L2OpchainDeployment struct { DelayedWETHPermissionlessGameProxy common.Address `json:"DelayedWETHPermissionlessGameProxy"` } +func NewL2OPChainDeploymentFromDeployOPChainOutput(output opcm.DeployOPChainOutput) L2OpchainDeployment { + return L2OpchainDeployment{ + OpChainProxyAdmin: output.OpChainProxyAdmin, + AddressManager: output.AddressManager, + L1ERC721BridgeProxy: output.L1ERC721BridgeProxy, + SystemConfigProxy: output.SystemConfigProxy, + OptimismMintableERC20FactoryProxy: output.OptimismMintableERC20FactoryProxy, + L1StandardBridgeProxy: output.L1StandardBridgeProxy, + L1CrossDomainMessengerProxy: output.L1CrossDomainMessengerProxy, + // Fault proof contracts below. + OptimismPortalProxy: output.OptimismPortalProxy, + ETHLockboxProxy: output.EthLockboxProxy, + DisputeGameFactoryProxy: output.DisputeGameFactoryProxy, + AnchorStateRegistryProxy: output.AnchorStateRegistryProxy, + FaultDisputeGame: output.FaultDisputeGame, + PermissionedDisputeGame: output.PermissionedDisputeGame, + DelayedWETHPermissionedGameProxy: output.DelayedWETHPermissionedGameProxy, + DelayedWETHPermissionlessGameProxy: output.DelayedWETHPermissionlessGameProxy, + } +} + type L2Deployment struct { L2OpchainDeployment diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 0703544e66d94..2d40c4451c94d 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -77,7 +77,6 @@ func (recipe *InteropDevRecipe) Build(addrs devkeys.Addresses) (*WorldConfig, er Challenger: challenger, Deployer: superchainDeployer, Implementations: OPCMImplementationsConfig{ - L1ContractsRelease: "dev", FaultProof: SuperFaultProofConfig{ WithdrawalDelaySeconds: big.NewInt(302400), MinProposalSizeBytes: big.NewInt(10000), @@ -264,8 +263,8 @@ func (r *InteropDevL2Recipe) build(l1ChainID uint64, addrs devkeys.Addresses) (* L2GenesisGraniteTimeOffset: new(hexutil.Uint64), L2GenesisHoloceneTimeOffset: new(hexutil.Uint64), L2GenesisIsthmusTimeOffset: new(hexutil.Uint64), + L2GenesisJovianTimeOffset: new(hexutil.Uint64), L2GenesisInteropTimeOffset: (*hexutil.Uint64)(&r.InteropOffset), - L2GenesisJovianTimeOffset: nil, L1CancunTimeOffset: new(hexutil.Uint64), L1PragueTimeOffset: new(hexutil.Uint64), }, diff --git a/op-chain-ops/script/cheatcodes.go b/op-chain-ops/script/cheatcodes.go index 46e2bc4e314c4..e9c2609080dff 100644 --- a/op-chain-ops/script/cheatcodes.go +++ b/op-chain-ops/script/cheatcodes.go @@ -43,3 +43,7 @@ func (c *AccessControlledPrecompile) Run(input []byte) ([]byte, error) { } return c.inner.Run(input) } + +func (c *AccessControlledPrecompile) Name() string { + return c.inner.Name() +} diff --git a/op-chain-ops/script/forking/db.go b/op-chain-ops/script/forking/db.go index a2edf36b28f41..d210e5c52565f 100644 --- a/op-chain-ops/script/forking/db.go +++ b/op-chain-ops/script/forking/db.go @@ -108,12 +108,7 @@ func (f *ForkDB) TrieDB() *triedb.Database { Preimages: false, IsVerkle: false, HashDB: nil, - PathDB: &pathdb.Config{ - StateHistory: 0, - CleanCacheSize: 0, - WriteBufferSize: 0, - ReadOnly: true, - }, + PathDB: pathdb.ReadOnly, }) return tdb } diff --git a/op-chain-ops/script/forking/state.go b/op-chain-ops/script/forking/state.go index e200e8903ef6d..9c49d81c2bb30 100644 --- a/op-chain-ops/script/forking/state.go +++ b/op-chain-ops/script/forking/state.go @@ -298,8 +298,8 @@ func (fst *ForkableState) GetRefund() uint64 { return fst.selected.GetRefund() } -func (fst *ForkableState) GetCommittedState(address common.Address, hash common.Hash) common.Hash { - return fst.stateFor(address).GetCommittedState(address, hash) +func (fst *ForkableState) GetStateAndCommittedState(address common.Address, hash common.Hash) (common.Hash, common.Hash) { + return fst.stateFor(address).GetStateAndCommittedState(address, hash) } func (fst *ForkableState) GetState(address common.Address, k common.Hash) common.Hash { diff --git a/op-chain-ops/script/forking/trie.go b/op-chain-ops/script/forking/trie.go index 10e4c34879c57..a8f6b99c1b629 100644 --- a/op-chain-ops/script/forking/trie.go +++ b/op-chain-ops/script/forking/trie.go @@ -35,6 +35,14 @@ func (f *ForkedAccountsTrie) Copy() *ForkedAccountsTrie { } } +func (f *ForkedAccountsTrie) PrefetchStorage(_ common.Address, _ [][]byte) error { + return nil +} + +func (f *ForkedAccountsTrie) PrefetchAccount(accounts []common.Address) error { + return nil +} + func (f *ForkedAccountsTrie) ExportDiff() *ExportDiff { return f.diff.Copy() } @@ -207,7 +215,7 @@ func (f *ForkedAccountsTrie) Commit(collectLeaf bool) (common.Hash, *trienode.No panic("cannot commit state-changes of a forked trie") } -func (f *ForkedAccountsTrie) Witness() map[string]struct{} { +func (f *ForkedAccountsTrie) Witness() map[string][]byte { panic("witness generation of a ForkedAccountsTrie is not supported") } diff --git a/op-chain-ops/script/precompile.go b/op-chain-ops/script/precompile.go index ee92950f4a026..0ba9f2e1b3c1f 100644 --- a/op-chain-ops/script/precompile.go +++ b/op-chain-ops/script/precompile.go @@ -78,6 +78,8 @@ type Precompile[E any] struct { // abiMethods is effectively the jump-table for 4-byte ABI calls to the precompile. abiMethods map[[4]byte]*precompileFunc + + name string } var _ vm.PrecompiledContract = (*Precompile[struct{}])(nil) @@ -107,6 +109,7 @@ func NewPrecompile[E any](e E, opts ...PrecompileOption[E]) (*Precompile[E], err fieldsOnly: false, fieldSetter: false, settable: make(map[[4]byte]*settableField), + name: reflect.TypeOf(e).Name(), } for _, opt := range opts { opt(out) @@ -188,7 +191,7 @@ func hasTrailingError(argCount int, getType func(i int) reflect.Type) bool { return false } lastTyp := getType(argCount - 1) - return lastTyp.Kind() == reflect.Interface && lastTyp.Implements(typeFor[error]()) + return lastTyp.Kind() == reflect.Interface && lastTyp.Implements(reflect.TypeFor[error]()) } // setupMethod takes a method definition, attached to selfVal, @@ -356,9 +359,9 @@ func goTypeToABIType(typ reflect.Type) (abi.Type, error) { // since big.Int interpretation defaults to uint256. type ABIInt256 big.Int -var abiInt256Type = typeFor[ABIInt256]() +var abiInt256Type = reflect.TypeFor[ABIInt256]() -var abiUint256Type = typeFor[uint256.Int]() +var abiUint256Type = reflect.TypeFor[uint256.Int]() // goTypeToSolidityType converts a Go type to the solidity ABI type definition. // The "internalType" is a quirk of the Geth ABI utils, for nested structures. @@ -408,7 +411,7 @@ func goTypeToSolidityType(typ reflect.Type) (typeDef, internalType string, err e if typ.AssignableTo(abiInt256Type) { return "int256", "", nil } - if typ.ConvertibleTo(typeFor[big.Int]()) { + if typ.ConvertibleTo(reflect.TypeFor[big.Int]()) { return "uint256", "", nil } // We can parse into abi.TupleTy in the future, if necessary @@ -631,6 +634,10 @@ func (p *Precompile[E]) Run(input []byte) ([]byte, error) { return out, nil } +func (p *Precompile[E]) Name() string { + return p.name +} + // revertSelector is the ABI signature of a default error type in solidity. var revertSelector = crypto.Keccak256([]byte("Error(string)"))[:4] @@ -643,9 +650,3 @@ func encodeRevert(outErr error) ([]byte, error) { out = append(out, rightPad32(outErrStr)...) // the error message string return out, vm.ErrExecutionReverted // Geth EVM will pick this up as a revert with return-data } - -// typeFor returns the [Type] that represents the type argument T. -// Note: not available yet in Go 1.21, but part of std-lib later. -func typeFor[T any]() reflect.Type { - return reflect.TypeOf((*T)(nil)).Elem() -} diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index 6a4b31fd2f43f..68c072a56884e 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -607,7 +607,11 @@ func (h *Host) handleRevertErr(addr common.Address, err error, revertMsg string, // onFault is a trace-hook, catches things more generic than regular EVM reverts. func (h *Host) onFault(pc uint64, op byte, gas, cost uint64, scope tracing.OpContext, depth int, err error) { - h.log.Warn("Fault", "addr", scope.Address(), "label", h.labels[scope.Address()], "err", err, "depth", depth) + var byte4 string + if len(scope.CallInput()) >= 4 { + byte4 = hexutil.Encode(scope.CallInput()[:4]) + } + h.log.Warn("Fault", "addr", scope.Address(), "label", h.labels[scope.Address()], "err", err, "depth", depth, "op", op, "byte4", byte4) } // unwindCallstack is a helper to remove call-stack entries. diff --git a/op-challenger/README.md b/op-challenger/README.md index 310880e40f7ee..84d7b53ccdac4 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -22,11 +22,10 @@ accessed by running `./op-challenger --help`. ### Running with Cannon on Local Devnet To run `op-challenger` against the local devnet, first clean and run -the devnet from the root of the repository. +the devnet. From the root of the repository run: ```shell -make devnet-clean -make devnet-up +cd kurtosis-devnet && just simple-devnet ``` Then build the `op-challenger` with `make op-challenger`. @@ -57,6 +56,22 @@ The challenger will monitor dispute games and respond to any invalid claims by posting the correct trace as the counter-claim. The commands below can then be used to create and interact with games. +#### Devnet Management Commands + +```shell +# Check status +kurtosis enclave ls +kurtosis enclave inspect simple-devnet + +# View logs from specific services +kurtosis service logs simple-devnet op-challenger-challenger-2151908 # Adjust names as needed +kurtosis service logs simple-devnet op-node-2151908-node0 # Adjust names as needed + +# Stop and clean up when done +kurtosis enclave stop simple-devnet +kurtosis enclave rm simple-devnet +``` + ## Subcommands The `op-challenger` has a few subcommands to interact with on-chain diff --git a/op-challenger/cmd/main_test.go b/op-challenger/cmd/main_test.go index c06c94090133d..94fff227f0752 100644 --- a/op-challenger/cmd/main_test.go +++ b/op-challenger/cmd/main_test.go @@ -30,6 +30,8 @@ var ( cannonBin = "./bin/cannon" cannonServer = "./bin/op-program" cannonPreState = "./pre.json" + cannonKonaServer = "./bin/kona-host" + cannonKonaPreState = "./cannon-kona-pre.json" datadir = "./test_data" rollupRpc = "http://example.com:8555" asteriscBin = "./bin/asterisc" @@ -139,7 +141,7 @@ func TestOpSupervisor(t *testing.T) { func TestTraceType(t *testing.T) { t.Run("Default", func(t *testing.T) { - expectedDefault := []types.TraceType{types.TraceTypeCannon, types.TraceTypeAsteriscKona} + expectedDefault := []types.TraceType{types.TraceTypeCannon, types.TraceTypeAsteriscKona, types.TraceTypeCannonKona} cfg := configForArgs(t, addRequiredArgsForMultipleTracesExcept(expectedDefault, "--trace-type")) require.Equal(t, expectedDefault, cfg.TraceTypes) }) @@ -328,6 +330,25 @@ func TestPollInterval(t *testing.T) { }) } +func TestMinUpdateInterval(t *testing.T) { + t.Run("DefaultsToZero", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(types.TraceTypeCannon)) + require.Equal(t, time.Duration(0), cfg.MinUpdateInterval) + }) + + t.Run("Valid", func(t *testing.T) { + cfg := configForArgs(t, addRequiredArgs(types.TraceTypeAlphabet, "--min-update-interval", "10m")) + require.Equal(t, 10*time.Minute, cfg.MinUpdateInterval) + }) + + t.Run("Invalid", func(t *testing.T) { + verifyArgsInvalid( + t, + "invalid value \"abc\" for flag -min-update-interval", + addRequiredArgs(types.TraceTypeAlphabet, "--min-update-interval", "abc")) + }) +} + func TestAsteriscOpProgramRequiredArgs(t *testing.T) { traceType := types.TraceTypeAsterisc t.Run(fmt.Sprintf("TestAsteriscServer-%v", traceType), func(t *testing.T) { @@ -482,6 +503,48 @@ func TestAsteriscKonaRequiredArgs(t *testing.T) { }) } +// validateCustomNetworkFlagsProhibitedWithNetworkFlag ensures custom network flags are not used simultaneously with the network flag. +// It validates disallowed flag combinations for a given trace type and trace type prefix configuration. +func validateCustomNetworkFlagsProhibitedWithNetworkFlag(t *testing.T, traceType types.TraceType, traceTypeForFlagPrefix types.TraceType, customNetworkFlag string) { + expectedError := fmt.Sprintf("flag network can not be used with rollup-config/%v-rollup-config, l2-genesis/%v-l2-genesis, l1-genesis/%v-l1-genesis or %v", traceTypeForFlagPrefix, traceTypeForFlagPrefix, traceTypeForFlagPrefix, customNetworkFlag) + + // Test the custom l2 flag + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndCustomL2Flag-%v", traceType), func(t *testing.T) { + verifyArgsInvalid( + t, + expectedError, + addRequiredArgs(traceType, fmt.Sprintf("--%v=true", customNetworkFlag))) + }) + + // Now test flags with trace-specific permutations + customNetworkFlags := map[string]string{ + "RollupConfig": "rollup-config", + "L2Genesis": "l2-genesis", + "L1Genesis": "l1-genesis", + } + for testName, flag := range customNetworkFlags { + for _, withTraceSpecificPrefix := range []bool{true, false} { + var postFix string + if withTraceSpecificPrefix { + postFix = "-withTraceSpecificPrefix" + } + + t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAnd%v-%v%v", testName, traceType, postFix), func(t *testing.T) { + var prefix string + if withTraceSpecificPrefix { + prefix = fmt.Sprintf("%v-", traceTypeForFlagPrefix) + } + flagName := fmt.Sprintf("%v%v", prefix, flag) + + verifyArgsInvalid( + t, + expectedError, + addRequiredArgs(traceType, fmt.Sprintf("--%v=somevalue.json", flagName))) + }) + } + } +} + func TestAsteriscBaseRequiredArgs(t *testing.T) { for _, traceType := range []types.TraceType{types.TraceTypeAsterisc, types.TraceTypeAsteriscKona} { traceType := traceType @@ -560,12 +623,7 @@ func TestAsteriscBaseRequiredArgs(t *testing.T) { addRequiredArgsExcept(traceType, "--network", "--l2-genesis=gensis.json")) }) - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - verifyArgsInvalid( - t, - "flag network can not be used with rollup-config, l2-genesis or asterisc-kona-l2-custom", - addRequiredArgs(traceType, "--rollup-config=rollup.json")) - }) + validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, traceType, types.TraceTypeAsteriscKona, "asterisc-kona-l2-custom") t.Run(fmt.Sprintf("TestNetwork-%v", traceType), func(t *testing.T) { t.Run("NotRequiredForAlphabetTrace", func(t *testing.T) { @@ -648,26 +706,7 @@ func TestCannonCustomConfigArgs(t *testing.T) { addRequiredArgsExcept(traceType, "--network", "--cannon-l2-genesis=gensis.json")) }) - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - verifyArgsInvalid( - t, - "flag network can not be used with cannon-rollup-config, l2-genesis or cannon-l2-custom", - addRequiredArgs(traceType, "--cannon-rollup-config=rollup.json")) - }) - - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - args := requiredArgs(traceType) - delete(args, "--network") - delete(args, "--game-factory-address") - args["--network"] = network - args["--cannon-rollup-config"] = "rollup.json" - args["--cannon-l2-genesis"] = "gensis.json" - args["--cannon-l2-custom"] = "true" - verifyArgsInvalid( - t, - "flag network can not be used with cannon-rollup-config, cannon-l2-genesis or cannon-l2-custom", - toArgList(args)) - }) + validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, traceType, types.TraceTypeCannon, "cannon-l2-custom") t.Run(fmt.Sprintf("TestNetwork-%v", traceType), func(t *testing.T) { t.Run("NotRequiredWhenRollupAndGenesIsSpecified", func(t *testing.T) { @@ -741,26 +780,7 @@ func TestSuperCannonCustomConfigArgs(t *testing.T) { addRequiredArgsExcept(traceType, "--network", "--cannon-rollup-config=rollup.json", "--cannon-l2-genesis=gensis.json")) }) - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - verifyArgsInvalid( - t, - "flag network can not be used with cannon-rollup-config, l2-genesis or cannon-l2-custom", - addRequiredArgs(traceType, "--cannon-rollup-config=rollup.json")) - }) - - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - args := requiredArgs(traceType) - delete(args, "--network") - delete(args, "--game-factory-address") - args["--network"] = network - args["--cannon-rollup-config"] = "rollup.json" - args["--cannon-l2-genesis"] = "gensis.json" - args["--cannon-l2-custom"] = "true" - verifyArgsInvalid( - t, - "flag network can not be used with cannon-rollup-config, cannon-l2-genesis or cannon-l2-custom", - toArgList(args)) - }) + validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, traceType, types.TraceTypeCannon, "cannon-l2-custom") t.Run(fmt.Sprintf("TestNetwork-%v", traceType), func(t *testing.T) { t.Run("NotRequiredWhenRollupGenesisAndDepsetIsSpecified", func(t *testing.T) { @@ -847,26 +867,7 @@ func TestSuperAsteriscKonaCustomConfigArgs(t *testing.T) { addRequiredArgsExcept(traceType, "--network", "--asterisc-kona-rollup-config=rollup.json", "--asterisc-kona-l2-genesis=gensis.json")) }) - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - verifyArgsInvalid( - t, - "flag network can not be used with asterisc-kona-rollup-config, l2-genesis or asterisc-kona-l2-custom", - addRequiredArgs(traceType, "--asterisc-kona-rollup-config=rollup.json")) - }) - - t.Run(fmt.Sprintf("TestMustNotSpecifyNetworkAndRollup-%v", traceType), func(t *testing.T) { - args := requiredArgs(traceType) - delete(args, "--network") - delete(args, "--game-factory-address") - args["--network"] = network - args["--asterisc-kona-rollup-config"] = "rollup.json" - args["--asterisc-kona-l2-genesis"] = "gensis.json" - args["--asterisc-kona-l2-custom"] = "true" - verifyArgsInvalid( - t, - "flag network can not be used with asterisc-kona-rollup-config, asterisc-kona-l2-genesis or asterisc-kona-l2-custom", - toArgList(args)) - }) + validateCustomNetworkFlagsProhibitedWithNetworkFlag(t, traceType, types.TraceTypeAsteriscKona, "asterisc-kona-l2-custom") t.Run(fmt.Sprintf("TestNetwork-%v", traceType), func(t *testing.T) { t.Run("NotRequiredWhenRollupGenesisAndDepsetIsSpecified", func(t *testing.T) { @@ -1287,6 +1288,8 @@ func requiredArgs(traceType types.TraceType) map[string]string { switch traceType { case types.TraceTypeCannon, types.TraceTypePermissioned: addRequiredCannonArgs(args) + case types.TraceTypeCannonKona: + addRequiredCannonKonaArgs(args) case types.TraceTypeAsterisc: addRequiredAsteriscArgs(args) case types.TraceTypeAsteriscKona: @@ -1311,6 +1314,11 @@ func addRequiredCannonArgs(args map[string]string) { addRequiredOutputRootArgs(args) } +func addRequiredCannonKonaArgs(args map[string]string) { + addRequiredCannonKonaBaseArgs(args) + addRequiredOutputRootArgs(args) +} + func addRequiredOutputRootArgs(args map[string]string) { args["--rollup-rpc"] = rollupRpc } @@ -1322,6 +1330,13 @@ func addRequiredCannonBaseArgs(args map[string]string) { args["--cannon-prestate"] = cannonPreState } +func addRequiredCannonKonaBaseArgs(args map[string]string) { + args["--network"] = network + args["--cannon-bin"] = cannonBin + args["--cannon-kona-server"] = cannonKonaServer + args["--cannon-kona-prestate"] = cannonKonaPreState +} + func addRequiredAsteriscArgs(args map[string]string) { addRequiredOutputRootArgs(args) args["--network"] = network diff --git a/op-challenger/config/config.go b/op-challenger/config/config.go index eb55ca2c978b1..874acdcd145f2 100644 --- a/op-challenger/config/config.go +++ b/op-challenger/config/config.go @@ -17,17 +17,20 @@ import ( ) var ( - ErrMissingTraceType = errors.New("no supported trace types specified") - ErrMissingDatadir = errors.New("missing datadir") - ErrMaxConcurrencyZero = errors.New("max concurrency must not be 0") - ErrMissingL2Rpc = errors.New("missing L2 rpc url") - ErrMissingCannonAbsolutePreState = errors.New("missing cannon absolute pre-state") - ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") - ErrMissingL1Beacon = errors.New("missing l1 beacon url") - ErrMissingGameFactoryAddress = errors.New("missing game factory address") - ErrMissingCannonSnapshotFreq = errors.New("missing cannon snapshot freq") - ErrMissingCannonInfoFreq = errors.New("missing cannon info freq") - ErrMissingDepsetConfig = errors.New("missing network or depset config path") + ErrMissingTraceType = errors.New("no supported trace types specified") + ErrMissingDatadir = errors.New("missing datadir") + ErrMaxConcurrencyZero = errors.New("max concurrency must not be 0") + ErrMissingL2Rpc = errors.New("missing L2 rpc url") + ErrMissingCannonAbsolutePreState = errors.New("missing cannon absolute pre-state") + ErrMissingL1EthRPC = errors.New("missing l1 eth rpc url") + ErrMissingL1Beacon = errors.New("missing l1 beacon url") + ErrMissingGameFactoryAddress = errors.New("missing game factory address") + ErrMissingCannonSnapshotFreq = errors.New("missing cannon snapshot freq") + ErrMissingCannonInfoFreq = errors.New("missing cannon info freq") + ErrMissingCannonKonaAbsolutePreState = errors.New("missing cannon kona absolute pre-state") + ErrMissingCannonKonaSnapshotFreq = errors.New("missing cannon kona snapshot freq") + ErrMissingCannonKonaInfoFreq = errors.New("missing cannon kona info freq") + ErrMissingDepsetConfig = errors.New("missing network or depset config path") ErrMissingRollupRpc = errors.New("missing rollup rpc url") ErrMissingSupervisorRpc = errors.New("missing supervisor rpc url") @@ -52,8 +55,10 @@ const ( // The default value is 28 days. The worst case duration for a game is 16 days // (due to clock extension), plus 7 days WETH withdrawal delay leaving a 5 day // buffer to monitor games to ensure bonds are claimed. - DefaultGameWindow = 28 * 24 * time.Hour - DefaultMaxPendingTx = 10 + DefaultGameWindow = 28 * 24 * time.Hour + DefaultMaxPendingTx = 10 + DefaultResponseDelay = 0 // No delay by default + DefaultResponseDelayAfter = 0 // Apply delay from first response by default ) // Config is a well typed config that is parsed from the CLI params. @@ -69,6 +74,7 @@ type Config struct { MaxConcurrency uint // Maximum number of threads to use when progressing games PollInterval time.Duration // Polling interval for latest-block subscription when using an HTTP RPC provider AllowInvalidPrestate bool // Whether to allow responding to games where the prestate does not match + MinUpdateInterval time.Duration // Minimum duration the L1 head block time must advance before scheduling a new update cycle AdditionalBondClaimants []common.Address // List of addresses to claim bonds for in addition to the tx manager sender @@ -81,9 +87,12 @@ type Config struct { L2Rpcs []string // L2 RPC Url // Specific to the cannon trace provider - Cannon vm.Config - CannonAbsolutePreState string // File to load the absolute pre-state for Cannon traces from - CannonAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Cannon traces from + Cannon vm.Config + CannonAbsolutePreState string // File to load the absolute pre-state for Cannon traces from + CannonAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for Cannon traces from + CannonKona vm.Config + CannonKonaAbsolutePreState string // File to load the absolute pre-state for CannonKona traces from + CannonKonaAbsolutePreStateBaseURL *url.URL // Base URL to retrieve absolute pre-states for CannonKona traces from // Specific to the asterisc trace provider Asterisc vm.Config @@ -98,6 +107,19 @@ type Config struct { TxMgrConfig txmgr.CLIConfig MetricsConfig opmetrics.CLIConfig PprofConfig oppprof.CLIConfig + + ResponseDelay time.Duration /* Delay before responding to each game action to slow down game progression. + Note: set with caution, since the challenger can end up using more resources if it has to wait to respond + to an attacker generating many claims. Consider using the additional ResponseDelayAfter config option. + Also note that the delay is only applied when: + 1) delaying will not lead to a timeout of the game, + 2) the challenger is not in a clock extension period and + 3) delaying will not lead to the challenger having to respond inside of a clock extension period + (thus ensuring that the challenger always has enough remaining time to respond to the game action). */ + ResponseDelayAfter uint64 /* Number of responses after which to start applying the delay. + Set to 0 to apply delay from the first response, 1 to skip the first response, etc. + Note: the delay is only applied from the next round after which this `responseDelayAfter` value + is surpassed (not from the exact response after which its surpassed, but from the next round). */ } func NewInteropConfig( @@ -138,6 +160,16 @@ func NewInteropConfig( DebugInfo: true, BinarySnapshots: true, }, + CannonKona: vm.Config{ + VmType: types.TraceTypeCannonKona, + L1: l1EthRpc, + L1Beacon: l1BeaconApi, + L2s: l2Rpcs, + SnapshotFreq: DefaultCannonSnapshotFreq, + InfoFreq: DefaultCannonInfoFreq, + DebugInfo: true, + BinarySnapshots: true, + }, Asterisc: vm.Config{ VmType: types.TraceTypeAsterisc, L1: l1EthRpc, @@ -198,6 +230,16 @@ func NewConfig( DebugInfo: true, BinarySnapshots: true, }, + CannonKona: vm.Config{ + VmType: types.TraceTypeCannonKona, + L1: l1EthRpc, + L1Beacon: l1BeaconApi, + L2s: []string{l2EthRpc}, + SnapshotFreq: DefaultCannonSnapshotFreq, + InfoFreq: DefaultCannonInfoFreq, + DebugInfo: true, + BinarySnapshots: true, + }, Asterisc: vm.Config{ VmType: types.TraceTypeAsterisc, L1: l1EthRpc, @@ -266,6 +308,14 @@ func (c Config) Check() error { return err } } + if c.TraceTypeEnabled(types.TraceTypeCannonKona) { + if c.RollupRpc == "" { + return ErrMissingRollupRpc + } + if err := c.validateBaseCannonKonaOptions(); err != nil { + return err + } + } if c.TraceTypeEnabled(types.TraceTypeAsterisc) { if c.RollupRpc == "" { return ErrMissingRollupRpc @@ -336,6 +386,22 @@ func (c Config) validateBaseCannonOptions() error { return nil } +func (c Config) validateBaseCannonKonaOptions() error { + if err := c.CannonKona.Check(); err != nil { + return fmt.Errorf("cannon kona: %w", err) + } + if c.CannonKonaAbsolutePreState == "" && c.CannonKonaAbsolutePreStateBaseURL == nil { + return ErrMissingCannonKonaAbsolutePreState + } + if c.CannonKona.SnapshotFreq == 0 { + return ErrMissingCannonKonaSnapshotFreq + } + if c.CannonKona.InfoFreq == 0 { + return ErrMissingCannonKonaInfoFreq + } + return nil +} + func (c Config) validateBaseAsteriscKonaOptions() error { if err := c.AsteriscKona.Check(); err != nil { return fmt.Errorf("asterisc kona: %w", err) diff --git a/op-challenger/config/config_test.go b/op-challenger/config/config_test.go index 7602063b2d6bd..0bd30b500ec77 100644 --- a/op-challenger/config/config_test.go +++ b/op-challenger/config/config_test.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "runtime" + "slices" "testing" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/vm" @@ -42,6 +43,11 @@ var ( validAsteriscKonaNetwork = "mainnet" validAsteriscKonaAbsolutePreState = "pre.json" validAsteriscKonaAbsolutePreStateBaseURL, _ = url.Parse("http://localhost/bar/") + + validCannonKonaBin = "./bin/cannon" + validCannonKonaServerBin = "./bin/kona-host" + validCannonKonaNetwork = "mainnet" + validCannonKonaAbsolutePreStateBaseURL, _ = url.Parse("http://localhost/bar/") ) var singleCannonTraceTypes = []types.TraceType{types.TraceTypeCannon, types.TraceTypePermissioned} @@ -119,6 +125,20 @@ func applyValidConfigForAsteriscKona(t *testing.T, cfg *Config) { cfg.AsteriscKona.Networks = []string{validAsteriscKonaNetwork} } +func applyValidConfigForCannonKona(t *testing.T, cfg *Config) { + tmpDir := t.TempDir() + vmBin := filepath.Join(tmpDir, validCannonKonaBin) + server := filepath.Join(tmpDir, validCannonKonaServerBin) + err := ensureExists(vmBin) + require.NoError(t, err) + err = ensureExists(server) + require.NoError(t, err) + cfg.CannonKona.VmBin = vmBin + cfg.CannonKona.Server = server + cfg.CannonKonaAbsolutePreStateBaseURL = validCannonKonaAbsolutePreStateBaseURL + cfg.CannonKona.Networks = []string{validCannonKonaNetwork} +} + func applyValidConfigForSuperAsteriscKona(t *testing.T, cfg *Config) { cfg.SupervisorRPC = validSupervisorRpc applyValidConfigForAsteriscKona(t, cfg) @@ -132,6 +152,9 @@ func validConfig(t *testing.T, traceType types.TraceType) Config { if traceType == types.TraceTypeCannon || traceType == types.TraceTypePermissioned { applyValidConfigForCannon(t, &cfg) } + if traceType == types.TraceTypeCannonKona { + applyValidConfigForCannonKona(t, &cfg) + } if traceType == types.TraceTypeAsterisc { applyValidConfigForAsterisc(t, &cfg) } @@ -144,6 +167,28 @@ func validConfig(t *testing.T, traceType types.TraceType) Config { return cfg } +func validConfigWithNoNetworks(t *testing.T, traceType types.TraceType) Config { + cfg := validConfig(t, traceType) + + mutateVmConfig := func(cfg *vm.Config) { + cfg.Networks = nil + cfg.RollupConfigPaths = []string{"foo.json"} + cfg.L2GenesisPaths = []string{"genesis.json"} + cfg.L1GenesisPath = "bar.json" + cfg.DepsetConfigPath = "foo.json" + } + if slices.Contains(allCannonTraceTypes, traceType) { + mutateVmConfig(&cfg.Cannon) + } + if slices.Contains(asteriscTraceTypes, traceType) { + mutateVmConfig(&cfg.Asterisc) + } + if slices.Contains(asteriscKonaTraceTypes, traceType) { + mutateVmConfig(&cfg.AsteriscKona) + } + return cfg +} + // TestValidConfigIsValid checks that the config provided by validConfig is actually valid func TestValidConfigIsValid(t *testing.T) { for _, traceType := range types.TraceTypes { @@ -261,20 +306,14 @@ func TestCannonRequiredArgs(t *testing.T) { }) t.Run(fmt.Sprintf("TestCannonNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.Cannon.Networks = nil + cfg := validConfigWithNoNetworks(t, traceType) cfg.Cannon.RollupConfigPaths = nil - cfg.Cannon.L2GenesisPaths = []string{"genesis.json"} - cfg.Cannon.DepsetConfigPath = "foo.json" require.ErrorIs(t, cfg.Check(), vm.ErrMissingRollupConfig) }) t.Run(fmt.Sprintf("TestCannonNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.Cannon.Networks = nil - cfg.Cannon.RollupConfigPaths = []string{"foo.json"} + cfg := validConfigWithNoNetworks(t, traceType) cfg.Cannon.L2GenesisPaths = nil - cfg.Cannon.DepsetConfigPath = "foo.json" require.ErrorIs(t, cfg.Check(), vm.ErrMissingL2Genesis) }) @@ -354,6 +393,7 @@ func TestDepsetConfig(t *testing.T) { cfg := validConfig(t, traceType) cfg.Cannon.Networks = nil cfg.Cannon.RollupConfigPaths = []string{"foo.json"} + cfg.Cannon.L1GenesisPath = "bar.json" cfg.Cannon.L2GenesisPaths = []string{"genesis.json"} cfg.Cannon.DepsetConfigPath = "" require.NoError(t, cfg.Check()) @@ -366,6 +406,7 @@ func TestDepsetConfig(t *testing.T) { cfg := validConfig(t, traceType) cfg.AsteriscKona.Networks = nil cfg.AsteriscKona.RollupConfigPaths = []string{"foo.json"} + cfg.AsteriscKona.L1GenesisPath = "bar.json" cfg.AsteriscKona.L2GenesisPaths = []string{"genesis.json"} cfg.AsteriscKona.DepsetConfigPath = "" require.NoError(t, cfg.Check()) @@ -441,17 +482,13 @@ func TestAsteriscRequiredArgs(t *testing.T) { }) t.Run(fmt.Sprintf("TestAsteriscNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.Asterisc.Networks = nil + cfg := validConfigWithNoNetworks(t, traceType) cfg.Asterisc.RollupConfigPaths = nil - cfg.Asterisc.L2GenesisPaths = []string{"genesis.json"} require.ErrorIs(t, cfg.Check(), vm.ErrMissingRollupConfig) }) t.Run(fmt.Sprintf("TestAsteriscNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.Asterisc.Networks = nil - cfg.Asterisc.RollupConfigPaths = []string{"foo.json"} + cfg := validConfigWithNoNetworks(t, traceType) cfg.Asterisc.L2GenesisPaths = nil require.ErrorIs(t, cfg.Check(), vm.ErrMissingL2Genesis) }) @@ -557,17 +594,13 @@ func TestAsteriscKonaRequiredArgs(t *testing.T) { }) t.Run(fmt.Sprintf("TestAsteriscKonaNetworkOrRollupConfigRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.AsteriscKona.Networks = nil + cfg := validConfigWithNoNetworks(t, traceType) cfg.AsteriscKona.RollupConfigPaths = nil - cfg.AsteriscKona.L2GenesisPaths = []string{"genesis.json"} require.ErrorIs(t, cfg.Check(), vm.ErrMissingRollupConfig) }) t.Run(fmt.Sprintf("TestAsteriscKonaNetworkOrL2GenesisRequired-%v", traceType), func(t *testing.T) { - cfg := validConfig(t, traceType) - cfg.AsteriscKona.Networks = nil - cfg.AsteriscKona.RollupConfigPaths = []string{"foo.json"} + cfg := validConfigWithNoNetworks(t, traceType) cfg.AsteriscKona.L2GenesisPaths = nil require.ErrorIs(t, cfg.Check(), vm.ErrMissingL2Genesis) }) diff --git a/op-challenger/flags/flags.go b/op-challenger/flags/flags.go index 235589c23327e..20fa198030434 100644 --- a/op-challenger/flags/flags.go +++ b/op-challenger/flags/flags.go @@ -32,7 +32,7 @@ func prefixEnvVars(name string) []string { } var ( - faultDisputeVMs = []types.TraceType{types.TraceTypeCannon, types.TraceTypeAsterisc, types.TraceTypeAsteriscKona, types.TraceTypeSuperCannon, types.TraceTypeSuperAsteriscKona} + faultDisputeVMs = []types.TraceType{types.TraceTypeCannon, types.TraceTypeCannonKona, types.TraceTypeAsterisc, types.TraceTypeAsteriscKona, types.TraceTypeSuperCannon, types.TraceTypeSuperAsteriscKona} // Required Flags L1EthRpcFlag = &cli.StringFlag{ Name: "l1-eth-rpc", @@ -74,7 +74,7 @@ var ( Name: "trace-type", Usage: "The trace types to support. Valid options: " + openum.EnumString(types.TraceTypes), EnvVars: prefixEnvVars("TRACE_TYPE"), - Value: cli.NewStringSlice(types.TraceTypeCannon.String(), types.TraceTypeAsteriscKona.String()), + Value: cli.NewStringSlice(types.TraceTypeCannon.String(), types.TraceTypeAsteriscKona.String(), types.TraceTypeCannonKona.String()), } DatadirFlag = &cli.StringFlag{ Name: "datadir", @@ -110,6 +110,11 @@ var ( EnvVars: prefixEnvVars("HTTP_POLL_INTERVAL"), Value: config.DefaultPollInterval, } + MinUpdateInterval = &cli.DurationFlag{ + Name: "min-update-interval", + Usage: "Minimum time between scheduling update cycles based on the L1 block time.", + EnvVars: prefixEnvVars("MIN_UPDATE_INTERVAL"), + } AdditionalBondClaimants = &cli.StringSliceFlag{ Name: "additional-bond-claimants", Usage: "List of addresses to claim bonds for, in addition to the configured transaction sender", @@ -138,6 +143,13 @@ var ( EnvVars: envVars, } }) + L1GenesisFlag = NewVMFlag("l1-genesis", EnvVarPrefix, faultDisputeVMs, func(name string, envVars []string, traceTypeInfo string) cli.Flag { + return &cli.StringFlag{ + Name: name, + Usage: "Path to the L1 genesis file. Only required if the L1 is not mainnet, sepolia, holesky, or hoodi.", + EnvVars: envVars, + } + }) DepsetConfigFlag = NewVMFlag("depset-config", EnvVarPrefix, faultDisputeVMs, func(name string, envVars []string, traceTypeInfo string) cli.Flag { return &cli.StringFlag{ Name: name, @@ -180,6 +192,24 @@ var ( EnvVars: prefixEnvVars("CANNON_INFO_FREQ"), Value: config.DefaultCannonInfoFreq, } + CannonKonaServerFlag = &cli.StringFlag{ + Name: "cannon-kona-server", + Usage: "Path to kona executable to use as pre-image oracle server when generating trace data (cannon-kona trace type only)", + EnvVars: prefixEnvVars("CANNON_KONA_SERVER"), + } + CannonKonaPreStateFlag = &cli.StringFlag{ + Name: "cannon-kona-prestate", + Usage: "Path to absolute prestate to use when generating trace data (cannon-kona trace type only)", + EnvVars: prefixEnvVars("CANNON_KONA_PRESTATE"), + } + CannonKonaL2CustomFlag = &cli.BoolFlag{ + Name: "cannon-kona-l2-custom", + Usage: "Notify the kona-host that the L2 chain uses custom config to be loaded via the preimage oracle. " + + "WARNING: This is incompatible with on-chain testing and must only be used for testing purposes.", + EnvVars: prefixEnvVars("CANNON_KONA_L2_CUSTOM"), + Value: false, + Hidden: true, + } AsteriscBinFlag = &cli.StringFlag{ Name: "asterisc-bin", Usage: "Path to asterisc executable to use when generating trace data (asterisc trace type only)", @@ -243,6 +273,18 @@ var ( EnvVars: prefixEnvVars("UNSAFE_ALLOW_INVALID_PRESTATE"), Hidden: true, // Hidden as this is an unsafe flag added only for testing purposes } + ResponseDelayFlag = &cli.DurationFlag{ + Name: "response-delay", + Usage: "Delay before responding to game actions to slow down game progression.", + EnvVars: prefixEnvVars("RESPONSE_DELAY"), + Value: config.DefaultResponseDelay, + } + ResponseDelayAfterFlag = &cli.Uint64Flag{ + Name: "response-delay-after", + Usage: "Number of responses after which to start applying the delay (0 = from first response).", + EnvVars: prefixEnvVars("RESPONSE_DELAY_AFTER"), + Value: config.DefaultResponseDelayAfter, + } ) // requiredFlags are checked by [CheckRequired] @@ -264,6 +306,7 @@ var optionalFlags = []cli.Flag{ L2ExperimentalEthRpcFlag, MaxPendingTransactionsFlag, HTTPPollInterval, + MinUpdateInterval, AdditionalBondClaimants, GameAllowlistFlag, CannonL2CustomFlag, @@ -272,6 +315,9 @@ var optionalFlags = []cli.Flag{ CannonPreStateFlag, CannonSnapshotFreqFlag, CannonInfoFreqFlag, + CannonKonaServerFlag, + CannonKonaPreStateFlag, + CannonKonaL2CustomFlag, AsteriscBinFlag, AsteriscServerFlag, AsteriscKonaL2CustomFlag, @@ -283,12 +329,15 @@ var optionalFlags = []cli.Flag{ GameWindowFlag, SelectiveClaimResolutionFlag, UnsafeAllowInvalidPrestate, + ResponseDelayFlag, + ResponseDelayAfterFlag, } func init() { optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) optionalFlags = append(optionalFlags, PreStatesURLFlag.Flags()...) optionalFlags = append(optionalFlags, RollupConfigFlag.Flags()...) + optionalFlags = append(optionalFlags, L1GenesisFlag.Flags()...) optionalFlags = append(optionalFlags, L2GenesisFlag.Flags()...) optionalFlags = append(optionalFlags, DepsetConfigFlag.Flags()...) optionalFlags = append(optionalFlags, txmgr.CLIFlagsWithDefaults(EnvVarPrefix, txmgr.DefaultChallengerFlagValues)...) @@ -310,9 +359,9 @@ func checkOutputProviderFlags(ctx *cli.Context) error { func CheckCannonBaseFlags(ctx *cli.Context) error { if ctx.IsSet(flags.NetworkFlagName) && - (RollupConfigFlag.IsSet(ctx, types.TraceTypeCannon) || L2GenesisFlag.IsSet(ctx, types.TraceTypeCannon) || ctx.Bool(CannonL2CustomFlag.Name)) { - return fmt.Errorf("flag %v can not be used with %v, %v or %v", - flags.NetworkFlagName, RollupConfigFlag.SourceFlagName(ctx, types.TraceTypeCannon), L2GenesisFlag.SourceFlagName(ctx, types.TraceTypeCannon), CannonL2CustomFlag.Name) + (RollupConfigFlag.IsSet(ctx, types.TraceTypeCannon) || L2GenesisFlag.IsSet(ctx, types.TraceTypeCannon) || L1GenesisFlag.IsSet(ctx, types.TraceTypeCannon) || ctx.Bool(CannonL2CustomFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v, %v, %v or %v", + flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(types.TraceTypeCannon), L2GenesisFlag.EitherFlagName(types.TraceTypeCannon), L1GenesisFlag.EitherFlagName(types.TraceTypeCannon), CannonL2CustomFlag.Name) } if ctx.Bool(CannonL2CustomFlag.Name) && !(RollupConfigFlag.IsSet(ctx, types.TraceTypeCannon) && L2GenesisFlag.IsSet(ctx, types.TraceTypeCannon)) { return fmt.Errorf("flag %v and %v must be set when %v is true", @@ -363,6 +412,39 @@ func CheckCannonFlags(ctx *cli.Context) error { return nil } +func CheckCannonKonaBaseFlags(ctx *cli.Context, traceType types.TraceType) error { + if !ctx.IsSet(flags.NetworkFlagName) && + !(RollupConfigFlag.IsSet(ctx, traceType) && L2GenesisFlag.IsSet(ctx, traceType)) { + return fmt.Errorf("flag %v or %v and %v is required", + flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(traceType), L2GenesisFlag.EitherFlagName(traceType)) + } + if ctx.IsSet(flags.NetworkFlagName) && + (RollupConfigFlag.IsSet(ctx, types.TraceTypeCannonKona) || L2GenesisFlag.IsSet(ctx, types.TraceTypeCannonKona) || L1GenesisFlag.IsSet(ctx, types.TraceTypeCannonKona) || ctx.Bool(CannonKonaL2CustomFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v, %v, %v or %v", + flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(types.TraceTypeCannonKona), L2GenesisFlag.EitherFlagName(types.TraceTypeCannonKona), L1GenesisFlag.EitherFlagName(types.TraceTypeCannonKona), CannonKonaL2CustomFlag.Name) + } + if !ctx.IsSet(CannonBinFlag.Name) { + return fmt.Errorf("flag %s is required", CannonBinFlag.Name) + } + return nil +} + +func CheckCannonKonaFlags(ctx *cli.Context) error { + if err := checkOutputProviderFlags(ctx); err != nil { + return err + } + if err := CheckCannonKonaBaseFlags(ctx, types.TraceTypeCannonKona); err != nil { + return err + } + if !ctx.IsSet(CannonKonaServerFlag.Name) { + return fmt.Errorf("flag %s is required", CannonKonaServerFlag.Name) + } + if !PreStatesURLFlag.IsSet(ctx, types.TraceTypeCannonKona) && !ctx.IsSet(CannonKonaPreStateFlag.Name) { + return fmt.Errorf("flag %s or %s is required", PreStatesURLFlag.EitherFlagName(types.TraceTypeCannonKona), CannonKonaPreStateFlag.Name) + } + return nil +} + func CheckAsteriscBaseFlags(ctx *cli.Context, traceType types.TraceType) error { if !ctx.IsSet(flags.NetworkFlagName) && !(RollupConfigFlag.IsSet(ctx, traceType) && L2GenesisFlag.IsSet(ctx, traceType)) { @@ -370,9 +452,9 @@ func CheckAsteriscBaseFlags(ctx *cli.Context, traceType types.TraceType) error { flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(traceType), L2GenesisFlag.EitherFlagName(traceType)) } if ctx.IsSet(flags.NetworkFlagName) && - (RollupConfigFlag.IsSet(ctx, types.TraceTypeAsteriscKona) || L2GenesisFlag.IsSet(ctx, types.TraceTypeAsteriscKona) || ctx.Bool(AsteriscKonaL2CustomFlag.Name)) { - return fmt.Errorf("flag %v can not be used with %v, %v or %v", - flags.NetworkFlagName, RollupConfigFlag.SourceFlagName(ctx, types.TraceTypeAsteriscKona), L2GenesisFlag.SourceFlagName(ctx, types.TraceTypeAsteriscKona), AsteriscKonaL2CustomFlag.Name) + (RollupConfigFlag.IsSet(ctx, types.TraceTypeAsteriscKona) || L2GenesisFlag.IsSet(ctx, types.TraceTypeAsteriscKona) || L1GenesisFlag.IsSet(ctx, types.TraceTypeAsteriscKona) || ctx.Bool(AsteriscKonaL2CustomFlag.Name)) { + return fmt.Errorf("flag %v can not be used with %v, %v, %v or %v", + flags.NetworkFlagName, RollupConfigFlag.EitherFlagName(types.TraceTypeAsteriscKona), L2GenesisFlag.EitherFlagName(types.TraceTypeAsteriscKona), L1GenesisFlag.EitherFlagName(types.TraceTypeAsteriscKona), AsteriscKonaL2CustomFlag.Name) } if !ctx.IsSet(AsteriscBinFlag.Name) { return fmt.Errorf("flag %s is required", AsteriscBinFlag.Name) @@ -451,6 +533,10 @@ func CheckRequired(ctx *cli.Context, traceTypes []types.TraceType) error { if err := CheckCannonFlags(ctx); err != nil { return err } + case types.TraceTypeCannonKona: + if err := CheckCannonKonaFlags(ctx); err != nil { + return err + } case types.TraceTypeAsterisc: if err := CheckAsteriscFlags(ctx); err != nil { return err @@ -596,6 +682,10 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro if err != nil { return nil, err } + cannonKonaPreStatesURL, err := getPrestatesUrl(types.TraceTypeCannonKona) + if err != nil { + return nil, err + } asteriscPreStatesURL, err := getPrestatesUrl(types.TraceTypeAsterisc) if err != nil { return nil, err @@ -621,6 +711,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro L2Rpcs: l2Rpcs, MaxPendingTx: ctx.Uint64(MaxPendingTransactionsFlag.Name), PollInterval: ctx.Duration(HTTPPollInterval.Name), + MinUpdateInterval: ctx.Duration(MinUpdateInterval.Name), AdditionalBondClaimants: claimants, RollupRpc: ctx.String(RollupRpcFlag.Name), SupervisorRPC: ctx.String(SupervisorRpcFlag.Name), @@ -635,6 +726,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro Networks: networks, L2Custom: ctx.Bool(CannonL2CustomFlag.Name), RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, types.TraceTypeCannon), + L1GenesisPath: L1GenesisFlag.String(ctx, types.TraceTypeCannon), L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, types.TraceTypeCannon), DepsetConfigPath: DepsetConfigFlag.String(ctx, types.TraceTypeCannon), SnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), @@ -644,7 +736,28 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro }, CannonAbsolutePreState: ctx.String(CannonPreStateFlag.Name), CannonAbsolutePreStateBaseURL: cannonPreStatesURL, - Datadir: ctx.String(DatadirFlag.Name), + CannonKona: vm.Config{ + VmType: types.TraceTypeCannonKona, + L1: l1EthRpc, + L1Beacon: l1Beacon, + L2s: l2Rpcs, + L2Experimental: l2Experimental, + VmBin: ctx.String(CannonBinFlag.Name), + Server: ctx.String(CannonKonaServerFlag.Name), + Networks: networks, + L2Custom: ctx.Bool(CannonKonaL2CustomFlag.Name), + RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, types.TraceTypeCannonKona), + L1GenesisPath: L1GenesisFlag.String(ctx, types.TraceTypeCannonKona), + L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, types.TraceTypeCannonKona), + DepsetConfigPath: DepsetConfigFlag.String(ctx, types.TraceTypeCannonKona), + SnapshotFreq: ctx.Uint(CannonSnapshotFreqFlag.Name), + InfoFreq: ctx.Uint(CannonInfoFreqFlag.Name), + DebugInfo: true, + BinarySnapshots: true, + }, + CannonKonaAbsolutePreState: ctx.String(CannonKonaPreStateFlag.Name), + CannonKonaAbsolutePreStateBaseURL: cannonKonaPreStatesURL, + Datadir: ctx.String(DatadirFlag.Name), Asterisc: vm.Config{ VmType: types.TraceTypeAsterisc, L1: l1EthRpc, @@ -655,6 +768,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro Server: ctx.String(AsteriscServerFlag.Name), Networks: networks, RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, types.TraceTypeAsterisc), + L1GenesisPath: L1GenesisFlag.String(ctx, types.TraceTypeAsterisc), L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, types.TraceTypeAsterisc), DepsetConfigPath: DepsetConfigFlag.String(ctx, types.TraceTypeAsterisc), SnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), @@ -674,6 +788,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro Networks: networks, L2Custom: ctx.Bool(AsteriscKonaL2CustomFlag.Name), RollupConfigPaths: RollupConfigFlag.StringSlice(ctx, types.TraceTypeAsteriscKona), + L1GenesisPath: L1GenesisFlag.String(ctx, types.TraceTypeAsteriscKona), L2GenesisPaths: L2GenesisFlag.StringSlice(ctx, types.TraceTypeAsteriscKona), DepsetConfigPath: DepsetConfigFlag.String(ctx, types.TraceTypeAsteriscKona), SnapshotFreq: ctx.Uint(AsteriscSnapshotFreqFlag.Name), @@ -687,5 +802,7 @@ func NewConfigFromCLI(ctx *cli.Context, logger log.Logger) (*config.Config, erro PprofConfig: pprofConfig, SelectiveClaimResolution: ctx.Bool(SelectiveClaimResolutionFlag.Name), AllowInvalidPrestate: ctx.Bool(UnsafeAllowInvalidPrestate.Name), + ResponseDelay: ctx.Duration(ResponseDelayFlag.Name), + ResponseDelayAfter: ctx.Uint64(ResponseDelayAfterFlag.Name), }, nil } diff --git a/op-challenger/flags/flags_test.go b/op-challenger/flags/flags_test.go index 7d64f787974a8..ca6ae847b7cef 100644 --- a/op-challenger/flags/flags_test.go +++ b/op-challenger/flags/flags_test.go @@ -5,7 +5,9 @@ import ( "slices" "strings" "testing" + "time" + "github.com/ethereum-optimism/optimism/op-challenger/config" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum/go-ethereum/common" @@ -93,6 +95,38 @@ func TestEnvVarFormat(t *testing.T) { } } +func TestResponseDelayFlag(t *testing.T) { + t.Run("IncludedInOptionalFlags", func(t *testing.T) { + require.Contains(t, optionalFlags, ResponseDelayFlag, "ResponseDelayFlag should be in optionalFlags") + }) + + t.Run("HasCorrectEnvVar", func(t *testing.T) { + envVars := ResponseDelayFlag.GetEnvVars() + require.Len(t, envVars, 1, "ResponseDelayFlag should have exactly one env var") + require.Equal(t, "OP_CHALLENGER_RESPONSE_DELAY", envVars[0], "ResponseDelayFlag should have correct env var") + }) + + t.Run("DefaultValue", func(t *testing.T) { + require.Equal(t, time.Duration(config.DefaultResponseDelay), ResponseDelayFlag.Value, "ResponseDelayFlag should have correct default value") + }) +} + +func TestResponseDelayAfterFlag(t *testing.T) { + t.Run("IncludedInOptionalFlags", func(t *testing.T) { + require.Contains(t, optionalFlags, ResponseDelayAfterFlag, "ResponseDelayAfterFlag should be in optionalFlags") + }) + + t.Run("HasCorrectEnvVar", func(t *testing.T) { + envVars := ResponseDelayAfterFlag.GetEnvVars() + require.Len(t, envVars, 1, "ResponseDelayAfterFlag should have exactly one env var") + require.Equal(t, "OP_CHALLENGER_RESPONSE_DELAY_AFTER", envVars[0], "ResponseDelayAfterFlag should have correct env var") + }) + + t.Run("DefaultValue", func(t *testing.T) { + require.Equal(t, uint64(config.DefaultResponseDelayAfter), ResponseDelayAfterFlag.Value, "ResponseDelayAfterFlag should have correct default value") + }) +} + func TestMultipleNetworksMustShareDisputeGameFactory(t *testing.T) { addrs := map[string]superchain.AddressesConfig{ "a1": {DisputeGameFactoryProxy: &common.Address{0xAA}}, diff --git a/op-challenger/game/fault/agent.go b/op-challenger/game/fault/agent.go index 10b28426f4b0b..c76331ce66770 100644 --- a/op-challenger/game/fault/agent.go +++ b/op-challenger/game/fault/agent.go @@ -6,8 +6,10 @@ import ( "fmt" "slices" "sync" + "sync/atomic" "time" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/solver" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" @@ -31,20 +33,27 @@ type Responder interface { type ClaimLoader interface { GetAllClaims(ctx context.Context, block rpcblock.Block) ([]types.Claim, error) IsL2BlockNumberChallenged(ctx context.Context, block rpcblock.Block) (bool, error) + GetClockExtension(ctx context.Context) (time.Duration, error) + GetSplitDepth(ctx context.Context) (types.Depth, error) + GetMaxGameDepth(ctx context.Context) (types.Depth, error) + GetOracle(ctx context.Context) (contracts.PreimageOracleContract, error) } type Agent struct { - metrics metrics.Metricer - systemClock clock.Clock - l1Clock types.ClockReader - solver *solver.GameSolver - loader ClaimLoader - responder Responder - selective bool - claimants []common.Address - maxDepth types.Depth - maxClockDuration time.Duration - log log.Logger + metrics metrics.Metricer + systemClock clock.Clock + l1Clock types.ClockReader + solver *solver.GameSolver + loader ClaimLoader + responder Responder + selective bool + claimants []common.Address + maxDepth types.Depth + maxClockDuration time.Duration + log log.Logger + responseDelay time.Duration + responseDelayAfter uint64 + responseCount atomic.Uint64 // Number of responses made in this game } func NewAgent( @@ -59,19 +68,24 @@ func NewAgent( log log.Logger, selective bool, claimants []common.Address, + responseDelay time.Duration, + responseDelayAfter uint64, ) *Agent { return &Agent{ - metrics: m, - systemClock: systemClock, - l1Clock: l1Clock, - solver: solver.NewGameSolver(maxDepth, trace), - loader: loader, - responder: responder, - selective: selective, - claimants: claimants, - maxDepth: maxDepth, - maxClockDuration: maxClockDuration, - log: log, + metrics: m, + systemClock: systemClock, + l1Clock: l1Clock, + solver: solver.NewGameSolver(maxDepth, trace), + loader: loader, + responder: responder, + selective: selective, + claimants: claimants, + maxDepth: maxDepth, + maxClockDuration: maxClockDuration, + log: log, + responseDelay: responseDelay, + responseDelayAfter: responseDelayAfter, + // responseCount starts at zero by default } } @@ -106,13 +120,13 @@ func (a *Agent) Act(ctx context.Context) error { var wg sync.WaitGroup wg.Add(len(actions)) for _, action := range actions { - go a.performAction(ctx, &wg, action) + go a.performAction(ctx, &wg, game, action) } wg.Wait() return nil } -func (a *Agent) performAction(ctx context.Context, wg *sync.WaitGroup, action types.Action) { +func (a *Agent) performAction(ctx context.Context, wg *sync.WaitGroup, game types.Game, action types.Action) { defer wg.Done() actionLog := a.log.New("action", action.Type) if action.Type == types.ActionTypeStep { @@ -133,6 +147,31 @@ func (a *Agent) performAction(ctx context.Context, wg *sync.WaitGroup, action ty actionLog = actionLog.New("is_attack", action.IsAttack, "parent", action.ParentClaim.ContractIndex, "value", action.Value) } + // Apply configurable delay before responding (to slow down game progression) + // Only apply delay if we've made enough responses already AND we're not in a clock extension period + currentResponseCount := a.responseCount.Load() + shouldCheckDelay := a.responseDelay > 0 && currentResponseCount >= a.responseDelayAfter + + if shouldCheckDelay { + // Check if we're in a clock extension period - if so, respond immediately + inExtension, remainingTimeCheck, err := a.shouldSkipDelay(ctx, game, action) + if err != nil { + actionLog.Warn("Failed to check delay conditions, skipping delay for safety", "err", err) + } else if inExtension { + actionLog.Info("Skipping delay due to clock extension period", "response_count", currentResponseCount, "delay_after", a.responseDelayAfter) + } else if remainingTimeCheck { + actionLog.Info("Skipping delay due to insufficient remaining game time", "response_count", currentResponseCount, "delay_after", a.responseDelayAfter) + } else { + actionLog.Info("Delaying response", "delay", a.responseDelay, "response_count", currentResponseCount, "delay_after", a.responseDelayAfter) + select { + case <-ctx.Done(): + actionLog.Error("Action cancelled during delay", "err", ctx.Err()) + return + case <-a.systemClock.After(a.responseDelay): + } + } + } + switch action.Type { case types.ActionTypeMove: a.metrics.RecordGameMove() @@ -141,10 +180,15 @@ func (a *Agent) performAction(ctx context.Context, wg *sync.WaitGroup, action ty case types.ActionTypeChallengeL2BlockNumber: a.metrics.RecordGameL2Challenge() } + actionLog.Info("Performing action") err := a.responder.PerformAction(ctx, action) if err != nil { actionLog.Error("Action failed", "err", err) + } else { + // Increment response count only on successful actions + newCount := a.responseCount.Add(1) + actionLog.Debug("Response count incremented", "response_count", newCount) } } @@ -235,6 +279,43 @@ func (a *Agent) resolveClaims(ctx context.Context) error { } } +// shouldSkipDelay determines if the delay should be skipped for the given action. +// Returns (inClockExtension, insufficientRemainingTime, error). +// Delay should be skipped if either inClockExtension OR insufficientRemainingTime is true. +func (a *Agent) shouldSkipDelay(ctx context.Context, game types.Game, action types.Action) (bool, bool, error) { + // Use proper chess clock calculation from types package + // We need OUR accumulated chess clock time to check if we're in extension period + now := a.l1Clock.Now() + ourAccumulatedTime := game.ChessClock(now, action.ParentClaim) + + // Get base clock extension (conservative approach) + clockExtension, err := a.loader.GetClockExtension(ctx) + if err != nil { + return false, false, fmt.Errorf("failed to get clock extension: %w", err) + } + + // Check if we're already in a clock extension period + maxClockDuration := a.maxClockDuration + extensionThreshold := maxClockDuration - clockExtension + inExtension := ourAccumulatedTime > extensionThreshold + + // Check if our delay would cause us to enter the extension period at all (conservative approach) + // We don't want to risk making moves inside the extension period, so if our delay would + // cause us to exceed the extension threshold, we skip the delay entirely + delayWouldEnterExtension := ourAccumulatedTime+a.responseDelay > extensionThreshold + + a.log.Debug("Delay skip check", + "our_accumulated_time", ourAccumulatedTime, + "max_clock_duration", maxClockDuration, + "clock_extension", clockExtension, + "extension_threshold", extensionThreshold, + "response_delay", a.responseDelay, + "in_extension", inExtension, + "delay_would_enter_extension", delayWouldEnterExtension) + + return inExtension, delayWouldEnterExtension, nil +} + // newGameFromContracts initializes a new game state from the state in the contract func (a *Agent) newGameFromContracts(ctx context.Context) (types.Game, error) { claims, err := a.loader.GetAllClaims(ctx, rpcblock.Latest) diff --git a/op-challenger/game/fault/agent_test.go b/op-challenger/game/fault/agent_test.go index 62dc560263e99..8f274dbe106d0 100644 --- a/op-challenger/game/fault/agent_test.go +++ b/op-challenger/game/fault/agent_test.go @@ -9,15 +9,19 @@ import ( "testing" "time" + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace" + "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/merkle" + keccakTypes "github.com/ethereum-optimism/optimism/op-challenger/game/keccak/types" "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" + faulttest "github.com/ethereum-optimism/optimism/op-challenger/game/fault/test" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/alphabet" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" @@ -27,6 +31,16 @@ import ( var l1Time = time.UnixMilli(100) +// newStubClaimLoaderWithDefaults creates a stubClaimLoader with sensible defaults +// for basic delay tests (prevents clock extension from triggering) +func newStubClaimLoaderWithDefaults() *stubClaimLoader { + return &stubClaimLoader{ + // A large clock extension value used to prevent clock + // extension from triggering during basic delay tests + clockExtension: 1 * time.Hour, + } +} + func TestDoNotMakeMovesWhenGameIsResolvable(t *testing.T) { ctx := context.Background() @@ -73,7 +87,7 @@ func TestDoNotMakeMovesWhenL2BlockNumberChallenged(t *testing.T) { } func createClaimsWithClaimants(t *testing.T, d types.Depth) []types.Claim { - claimBuilder := test.NewClaimBuilder(t, d, alphabet.NewTraceProvider(big.NewInt(0), d)) + claimBuilder := faulttest.NewClaimBuilder(t, d, alphabet.NewTraceProvider(big.NewInt(0), d)) rootClaim := claimBuilder.CreateRootClaim() claim1 := rootClaim claim1.Claimant = common.BigToAddress(big.NewInt(1)) @@ -158,14 +172,14 @@ func TestSkipAttemptingToResolveClaimsWhenClockNotExpired(t *testing.T) { responder.callResolveErr = errors.New("game is not resolvable") responder.callResolveClaimErr = errors.New("claim is not resolvable") depth := types.Depth(4) - claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) + claimBuilder := faulttest.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) rootTime := l1Time.Add(-agent.maxClockDuration - 5*time.Minute) - gameBuilder := claimBuilder.GameBuilder(test.WithClock(rootTime, 0)) + gameBuilder := claimBuilder.GameBuilder(faulttest.WithClock(rootTime, 0)) gameBuilder.Seq(). - Attack(test.WithClock(rootTime.Add(5*time.Minute), 5*time.Minute)). - Defend(test.WithClock(rootTime.Add(7*time.Minute), 2*time.Minute)). - Attack(test.WithClock(rootTime.Add(11*time.Minute), 4*time.Minute)) + Attack(faulttest.WithClock(rootTime.Add(5*time.Minute), 5*time.Minute)). + Defend(faulttest.WithClock(rootTime.Add(7*time.Minute), 2*time.Minute)). + Attack(faulttest.WithClock(rootTime.Add(11*time.Minute), 4*time.Minute)) claimLoader.claims = gameBuilder.Game.Claims() require.NoError(t, agent.Act(context.Background())) @@ -181,7 +195,7 @@ func TestLoadClaimsWhenGameNotResolvable(t *testing.T) { responder.callResolveErr = errors.New("game is not resolvable") responder.callResolveClaimErr = errors.New("claim is not resolvable") depth := types.Depth(4) - claimBuilder := test.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) + claimBuilder := faulttest.NewClaimBuilder(t, depth, alphabet.NewTraceProvider(big.NewInt(0), depth)) claimLoader.claims = []types.Claim{ claimBuilder.CreateRootClaim(), @@ -198,12 +212,12 @@ func setupTestAgent(t *testing.T) (*Agent, *stubClaimLoader, *stubResponder) { logger := testlog.Logger(t, log.LevelInfo) claimLoader := &stubClaimLoader{} depth := types.Depth(4) - gameDuration := 3 * time.Minute + gameDuration := 24 * time.Hour provider := alphabet.NewTraceProvider(big.NewInt(0), depth) responder := &stubResponder{} systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) l1Clock := clock.NewDeterministicClock(l1Time) - agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}) + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, 0, 0) return agent, claimLoader, responder } @@ -212,6 +226,10 @@ type stubClaimLoader struct { maxLoads int claims []types.Claim blockNumChallenged bool + clockExtension time.Duration + clockExtensionErr error + splitDepth types.Depth + maxGameDepth types.Depth } func (s *stubClaimLoader) IsL2BlockNumberChallenged(_ context.Context, _ rpcblock.Block) (bool, error) { @@ -226,6 +244,101 @@ func (s *stubClaimLoader) GetAllClaims(_ context.Context, _ rpcblock.Block) ([]t return s.claims, nil } +func (s *stubClaimLoader) GetClockExtension(_ context.Context) (time.Duration, error) { + if s.clockExtensionErr != nil { + return 0, s.clockExtensionErr + } + // Return a reasonable default if not set + if s.clockExtension == 0 { + return 5 * time.Minute, nil // Default clock extension + } + return s.clockExtension, nil +} + +func (s *stubClaimLoader) GetSplitDepth(_ context.Context) (types.Depth, error) { + if s.splitDepth != 0 { + return s.splitDepth, nil + } + return types.Depth(30), nil // Reasonable default for tests +} + +func (s *stubClaimLoader) GetMaxGameDepth(_ context.Context) (types.Depth, error) { + if s.maxGameDepth != 0 { + return s.maxGameDepth, nil + } + return types.Depth(73), nil // Reasonable default for tests +} + +func (s *stubClaimLoader) GetOracle(_ context.Context) (contracts.PreimageOracleContract, error) { + return &stubPreimageOracleContract{}, nil +} + +// stubPreimageOracleContract implements the PreimageOracleContract interface for testing +type stubPreimageOracleContract struct{} + +func (s *stubPreimageOracleContract) ChallengePeriod(_ context.Context) (uint64, error) { + return 86400, nil // 1 day in seconds - reasonable default for tests +} + +// Add minimal implementations for other required methods (if any) +func (s *stubPreimageOracleContract) Addr() common.Address { return common.Address{} } +func (s *stubPreimageOracleContract) AddGlobalDataTx(*types.PreimageOracleData) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} +func (s *stubPreimageOracleContract) InitLargePreimage(*big.Int, uint32, uint32) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} +func (s *stubPreimageOracleContract) AddLeaves(*big.Int, *big.Int, []byte, []common.Hash, bool) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} +func (s *stubPreimageOracleContract) MinLargePreimageSize(context.Context) (uint64, error) { + return 0, nil +} +func (s *stubPreimageOracleContract) CallSqueeze(context.Context, common.Address, *big.Int, keccakTypes.StateSnapshot, keccakTypes.Leaf, merkle.Proof, keccakTypes.Leaf, merkle.Proof) error { + return nil +} +func (s *stubPreimageOracleContract) Squeeze(common.Address, *big.Int, keccakTypes.StateSnapshot, keccakTypes.Leaf, merkle.Proof, keccakTypes.Leaf, merkle.Proof) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} +func (s *stubPreimageOracleContract) GetActivePreimages(context.Context, common.Hash) ([]keccakTypes.LargePreimageMetaData, error) { + return nil, nil +} +func (s *stubPreimageOracleContract) GetProposalMetadata(context.Context, rpcblock.Block, ...keccakTypes.LargePreimageIdent) ([]keccakTypes.LargePreimageMetaData, error) { + return nil, nil +} +func (s *stubPreimageOracleContract) GetProposalTreeRoot(context.Context, rpcblock.Block, keccakTypes.LargePreimageIdent) (common.Hash, error) { + return common.Hash{}, nil +} +func (s *stubPreimageOracleContract) GetInputDataBlocks(context.Context, rpcblock.Block, keccakTypes.LargePreimageIdent) ([]uint64, error) { + return nil, nil +} +func (s *stubPreimageOracleContract) DecodeInputData([]byte) (*big.Int, keccakTypes.InputData, error) { + return nil, keccakTypes.InputData{}, nil +} +func (s *stubPreimageOracleContract) GlobalDataExists(context.Context, *types.PreimageOracleData) (bool, error) { + return false, nil +} +func (s *stubPreimageOracleContract) GetGlobalData(context.Context, *types.PreimageOracleData) ([32]byte, error) { + return [32]byte{}, nil +} +func (s *stubPreimageOracleContract) ChallengeTx(keccakTypes.LargePreimageIdent, keccakTypes.Challenge) (txmgr.TxCandidate, error) { + return txmgr.TxCandidate{}, nil +} +func (s *stubPreimageOracleContract) GetMinBondLPP(context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + +// createStubGame creates a mock game for testing performAction calls +func createStubGame(claims []types.Claim) types.Game { + if len(claims) == 0 { + // Create a default root claim for tests + claims = []types.Claim{ + faulttest.NewClaimBuilder(nil, types.Depth(4), alphabet.NewTraceProvider(big.NewInt(0), types.Depth(4))).CreateRootClaim(), + } + } + return types.NewGameState(claims, types.Depth(4)) +} + type stubResponder struct { l sync.Mutex callResolveCount int @@ -239,6 +352,9 @@ type stubResponder struct { callResolveClaimErr error resolveClaimCount int resolvedClaims []uint64 + + performActionCount int + performActionErr error // If set, PerformAction will return this error } func (s *stubResponder) CallResolve(_ context.Context) (gameTypes.GameStatus, error) { @@ -274,5 +390,777 @@ func (s *stubResponder) ResolveClaims(claims ...uint64) error { } func (s *stubResponder) PerformAction(_ context.Context, _ types.Action) error { - return nil + s.l.Lock() + defer s.l.Unlock() + s.performActionCount++ + return s.performActionErr +} + +func (s *stubResponder) PerformedActionCount() int { + s.l.Lock() + defer s.l.Unlock() + return s.performActionCount +} + +// TestResponseDelay tests the response delay functionality using deterministic clock +func TestResponseDelay(t *testing.T) { + tests := []struct { + name string + delay time.Duration + }{ + { + name: "NoDelay", + delay: 0, + }, + { + name: "Delay", + delay: 20 * time.Hour, // Less than extension threshold (24h - 1h = 23h) + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + + // Set up agent with deterministic clock + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := newStubClaimLoaderWithDefaults() + depth := types.Depth(4) + gameDuration := 24 * time.Hour // Large value to avoid clock extension triggering + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + + // Create agent with the test response delay + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, test.delay, 0) + + // Set up game state with a claim to respond to + claimLoader.claims = []types.Claim{ + { + ClaimData: types.ClaimData{ + Value: common.Hash{}, + Position: types.NewPositionFromGIndex(big.NewInt(1)), + }, + Clock: types.Clock{ + Duration: time.Minute, + Timestamp: l1Time, + }, + ContractIndex: 0, + }, + } + + // Create an action that will trigger the delay + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: claimLoader.claims[0], + IsAttack: true, + Value: common.Hash{0x01}, + } + + // Perform action in a goroutine so we can control clock advancement + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + if test.delay > 0 { + // Wait for the action delay to begin waiting + require.True(t, systemClock.WaitForNewPendingTaskWithTimeout(30*time.Second)) + require.Zero(t, responder.PerformedActionCount(), "Action should not have completed before delay period") + + systemClock.AdvanceTime(test.delay) + } + // Verify the action completes + select { + case <-done: + // Expected completion due to cancellation + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete quickly after cancellation") + } + // And verify the wait group is done for good measure + wg.Wait() + + require.Equal(t, 1, responder.PerformedActionCount(), "Action should have completed after delay period") + }) + } +} + +// TestResponseDelayContextCancellation tests that context cancellation interrupts the delay +func TestResponseDelayContextCancellation(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + // Set up agent with long delay and deterministic clock + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := newStubClaimLoaderWithDefaults() + depth := types.Depth(4) + gameDuration := 24 * time.Hour + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + + longDelay := 5 * time.Minute + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, longDelay, 0) + + // Set up game state + claimLoader.claims = []types.Claim{ + { + ClaimData: types.ClaimData{ + Value: common.Hash{}, + Position: types.NewPositionFromGIndex(big.NewInt(1)), + }, + Clock: types.Clock{ + Duration: time.Minute, + Timestamp: l1Time, + }, + ContractIndex: 0, + }, + } + + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: claimLoader.claims[0], + IsAttack: true, + Value: common.Hash{0x01}, + } + + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Verify the action is waiting for the delay + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + + // Cancel the context (simulates timeout or shutdown) + cancel() + + // Action should complete even though the clock didn't progress + select { + case <-done: + // Expected completion due to cancellation + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete quickly after cancellation") + } + + // And verify the wait group is done for good measure + wg.Wait() + require.Zero(t, responder.PerformedActionCount(), "Action should not have completed") +} + +// TestResponseDelayDifferentActionTypes tests that delay applies to all action types +func TestResponseDelayDifferentActionTypes(t *testing.T) { + actionTypes := []struct { + name string + actionType types.ActionType + }{ + {"Move", types.ActionTypeMove}, + {"Step", types.ActionTypeStep}, + {"ChallengeL2BlockNumber", types.ActionTypeChallengeL2BlockNumber}, + } + + for _, actionTest := range actionTypes { + actionTest := actionTest + t.Run(actionTest.name, func(t *testing.T) { + ctx := context.Background() + + // Set up agent with deterministic clock and response delay + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := newStubClaimLoaderWithDefaults() + depth := types.Depth(4) + gameDuration := 24 * time.Hour // Large value to avoid clock extension triggering + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + + responseDelay := 3 * time.Hour + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, responseDelay, 0) + + // Set up game state + claimLoader.claims = []types.Claim{ + { + ClaimData: types.ClaimData{ + Value: common.Hash{}, + Position: types.NewPositionFromGIndex(big.NewInt(1)), + }, + Clock: types.Clock{ + Duration: time.Minute, + Timestamp: l1Time, + }, + ContractIndex: 0, + }, + } + + // Create action of specific type + action := types.Action{ + Type: actionTest.actionType, + ParentClaim: claimLoader.claims[0], + IsAttack: true, + Value: common.Hash{0x01}, + } + + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // First select: Verify the action is waiting for the delay (polling check) + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + require.Zero(t, responder.PerformedActionCount(), "Action was performed before delay") + + // Advance clock by delay amount + systemClock.AdvanceTime(responseDelay) + + // Second select: Wait for action to complete after clock advancement + select { + case <-done: + // Expected completion + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete after delay") + } + // Verify the wait group is done for good measure + wg.Wait() + + // Verify the action was performed + require.Equal(t, 1, responder.PerformedActionCount(), "Action was not performed after delay") + }) + } +} + +// TestResponseDelayAfter tests the response delay activation threshold functionality +func TestResponseDelayAfter(t *testing.T) { + tests := []struct { + name string + responseDelay time.Duration + responseDelayAfter uint64 + actionsToPerform int + }{ + { + name: "DelayFromFirstResponse", + responseDelay: 2 * time.Hour, + responseDelayAfter: 0, // Apply delay from first response + actionsToPerform: 3, + }, + { + name: "DelayAfterFirstResponse", + responseDelay: 2 * time.Hour, + responseDelayAfter: 1, // Skip first response, delay subsequent ones + actionsToPerform: 3, + }, + { + name: "DelayAfterSecondResponse", + responseDelay: 2 * time.Hour, + responseDelayAfter: 2, // Skip first two responses + actionsToPerform: 4, + }, + { + name: "DelayNeverActivates", + responseDelay: 2 * time.Hour, + responseDelayAfter: 5, // Threshold higher than actions performed + actionsToPerform: 3, + }, + { + name: "NoDelayConfigured", + responseDelay: 0, // No delay configured + responseDelayAfter: 0, + actionsToPerform: 3, + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + + // Set up agent with deterministic clock + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := newStubClaimLoaderWithDefaults() + depth := types.Depth(4) + gameDuration := 24 * time.Hour // Large value to avoid clock extension triggering + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + + // Create agent with test parameters + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, test.responseDelay, test.responseDelayAfter) + + // Set up initial game state + claimBuilder := faulttest.NewClaimBuilder(t, depth, provider) + baseClaim := claimBuilder.CreateRootClaim() + // Fix timestamp to be realistic + baseClaim.Clock = types.Clock{ + Duration: 0, // Root claim starts with no accumulated time + Timestamp: l1Clock.Now(), // Use current time + } + claimLoader.claims = []types.Claim{baseClaim} + + // Perform actions and verify delay behavior + for i := 0; i < test.actionsToPerform; i++ { + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: baseClaim, + IsAttack: true, + Value: common.Hash{byte(i + 1)}, // Unique value for each action + } + + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Calculate if delay should be applied: response count >= threshold AND delay > 0 + shouldHaveDelay := uint64(i) >= test.responseDelayAfter && test.responseDelay > 0 + + if shouldHaveDelay { + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + require.Equal(t, i, responder.PerformedActionCount(), "Action was performed before delay") + + // Advance clock by delay amount + systemClock.AdvanceTime(test.responseDelay) + } + + // Wait for completion + select { + case <-done: + // Expected completion + case <-time.After(30 * time.Second): + t.Fatalf("Action %d did not complete after delay", i+1) + } + wg.Wait() + + // Verify response count incremented (assuming successful response) + expectedCount := uint64(i + 1) + require.Equal(t, expectedCount, agent.responseCount.Load(), "Response count should increment after action %d", expectedCount) + } + }) + } +} + +// TestResponseDelayAfterWithFailedActions tests that failed actions don't increment response count +func TestResponseDelayAfterWithFailedActions(t *testing.T) { + ctx := context.Background() + + // Set up agent with delay after 1 response + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := newStubClaimLoaderWithDefaults() + depth := types.Depth(4) + gameDuration := 24 * time.Hour + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(l1Time) + + responseDelay := 2 * time.Hour + responseDelayAfter := uint64(1) // Delay after first successful response + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, gameDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, responseDelay, responseDelayAfter) + + // Set up game state + claimBuilder := faulttest.NewClaimBuilder(t, depth, provider) + baseClaim := claimBuilder.CreateRootClaim() + // Fix timestamp to be realistic + baseClaim.Clock = types.Clock{ + Duration: 0, // Root claim starts with no accumulated time + Timestamp: l1Clock.Now(), // Use current time + } + claimLoader.claims = []types.Claim{baseClaim} + + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: baseClaim, + IsAttack: true, + Value: common.Hash{0x01}, + } + + // First action: make responder fail + responder.performActionErr = errors.New("simulated action failure") + + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Should complete without needing to advance the clock (no delay since responseCount < responseDelayAfter) + select { + case <-done: + // Expected immediate completion + case <-time.After(30 * time.Second): + t.Fatal("Failed action took too long") + } + wg.Wait() + + require.Equal(t, uint64(0), agent.responseCount.Load(), "Failed action should not increment response count") + + // Second action: make responder succeed + responder.performActionErr = nil + + wg.Add(1) + done = make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Should complete without needing to advance the clock (no delay since responseCount is still 0) + select { + case <-done: + // Expected immediate completion + case <-time.After(30 * time.Second): + t.Fatal("Successful action took too long") + } + wg.Wait() + + // Should be no delay but response count should increment + require.Equal(t, uint64(1), agent.responseCount.Load(), "Successful action should increment response count") + + // Third action: should now have delay applied + wg.Add(1) + done = make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Should be waiting for delay now (responseCount >= responseDelayAfter) + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + // Note: 2 attempts have been made - one failed, one successful and the third is delayed. + require.Equal(t, 2, responder.PerformedActionCount(), "Should not have performed action without delay") + + // Advance clock by delay amount + systemClock.AdvanceTime(responseDelay) + + // Wait for completion + select { + case <-done: + // Expected completion + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete after delay") + } + + wg.Wait() + + require.Equal(t, 3, responder.PerformedActionCount(), "Should have performed action after delay") + require.Equal(t, uint64(2), agent.responseCount.Load(), "Response count should be 2 after second successful action") +} + +// TestResponseDelayClockExtension tests that delays are skipped during clock extension periods +func TestResponseDelayClockExtension(t *testing.T) { + // Common test configuration + const ( + responseDelay = 30 * time.Second // Reasonable delay that fits in remaining time + responseDelayAfter = 0 + maxClockDuration = 10 * time.Minute + clockExtension = 1 * time.Minute + baseTimestamp = 100000 // milliseconds since Unix epoch + ) + extensionThreshold := maxClockDuration - clockExtension // 9 minutes + + tests := []struct { + name string + parentClockDuration time.Duration // Previous accumulated time + timeSinceCreation time.Duration // Additional time since claim created + }{ + { + name: "NoExtension_WithDelay", + parentClockDuration: 3 * time.Minute, + timeSinceCreation: 1 * time.Minute, // Total: 4min < 9min threshold + }, + { + name: "InExtension_SkipDelay", + parentClockDuration: 8 * time.Minute, + timeSinceCreation: 2 * time.Minute, // Total: 10min > 9min threshold + }, + { + name: "ExactlyAtThreshold_InExtension_SkipDelay", + parentClockDuration: 8 * time.Minute, + timeSinceCreation: 1*time.Minute + 1*time.Microsecond, // Total: just over 9min + }, + { + name: "JustBelowThreshold_WithDelay_WaitDelay", + parentClockDuration: 8 * time.Minute, + timeSinceCreation: 20 * time.Second, // Total: 8min20s + 30s delay = 8min50s < 9min threshold + }, + } + + for _, test := range tests { + test := test + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + + // Set up agent with deterministic clock + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := &stubClaimLoader{ + clockExtension: clockExtension, + } + depth := types.Depth(4) + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + currentTime := time.UnixMilli(baseTimestamp).Add(test.timeSinceCreation) + systemClock := clock.NewDeterministicClock(currentTime) + l1Clock := clock.NewDeterministicClock(currentTime) + + // Create agent with test parameters + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, maxClockDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, responseDelay, responseDelayAfter) + + // Set up proper parent-child relationship for chess clock calculation + claimBuilder := faulttest.NewClaimBuilder(t, depth, provider) + + // Create a grandparent claim (root claim) that has the accumulated time from previous moves + grandparentClaim := claimBuilder.CreateRootClaim(faulttest.WithClock( + currentTime.Add(-test.timeSinceCreation).Add(-time.Duration(test.parentClockDuration.Nanoseconds())), + test.parentClockDuration, + )) + grandparentClaim.ContractIndex = 0 // Root claim + + // Create parent claim as an attack on the grandparent (so it's NOT a root claim) + parentClaim := claimBuilder.AttackClaim(grandparentClaim, faulttest.WithClock( + currentTime.Add(-test.timeSinceCreation), + 0, // This will be calculated by ChessClock + )) + parentClaim.ContractIndex = 1 // Set contract index + + // Calculate total chess clock time using the same logic as the contract + // This should be grandparent.Duration + time since parent was created + totalChessClockTime := test.parentClockDuration + test.timeSinceCreation + expectDelay := totalChessClockTime <= extensionThreshold + claimLoader.claims = []types.Claim{grandparentClaim, parentClaim} + + // Create action with the parent claim + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: parentClaim, + IsAttack: true, + Value: common.Hash{0x01}, + } + + // Perform action and measure timing + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + if expectDelay { + // Should be waiting for delay + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + require.Equal(t, 0, responder.PerformedActionCount(), "Should not have performed action without delay") + + // Advance clock by delay amount + systemClock.AdvanceTime(responseDelay) + } + + // Wait for completion + select { + case <-done: + // Expected completion + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete in expected time") + } + wg.Wait() + + require.Equal(t, 1, responder.PerformedActionCount(), "Should have performed action after delay") + }) + } +} + +// TestResponseDelayTimeoutPrevention tests delay timeout prevention logic +func TestResponseDelayTimeoutPrevention(t *testing.T) { + const ( + responseDelayAfter = 0 + maxClockDuration = 10 * time.Minute + clockExtension = 2 * time.Minute + ) + + tests := []struct { + name string + parentClockDuration time.Duration + responseDelay time.Duration + expectDelay bool + description string + }{ + { + name: "DelayFitsInExtensionBuffer_ShouldSkip", + parentClockDuration: 8*time.Minute + 30*time.Second, // Past threshold but delay fits + responseDelay: 1 * time.Minute, // Fits in 2min extension + expectDelay: false, // Should skip due to extension period + description: "When in extension period, should skip delay regardless of timeout risk", + }, + { + name: "DelayWouldTimeout_ShouldSkip", + parentClockDuration: 9*time.Minute + 30*time.Second, // Already in extension (threshold 8min) + responseDelay: 3 * time.Minute, // Large delay + expectDelay: false, // Should skip due to being in extension + description: "Should skip delay when already in extension period", + }, + { + name: "DelayWouldEnterExtensionPeriod_ShouldSkip", + parentClockDuration: 6 * time.Minute, // Not in extension (8min threshold) + responseDelay: 3 * time.Minute, // Would push us to 9min > 8min threshold + expectDelay: false, // Should skip to avoid extension period + description: "Should skip delay when it would cause entry into extension period", + }, + { + name: "BeforeThreshold_ShouldDelay", + parentClockDuration: 5 * time.Minute, // Well before threshold, 5min remaining + responseDelay: 30 * time.Second, // Short delay that fits in remaining time + expectDelay: true, // Should apply delay + description: "Should apply delay when well before extension threshold and delay fits", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + ctx := context.Background() + logger := testlog.Logger(t, log.LevelInfo) + + claimLoader := &stubClaimLoader{ + clockExtension: clockExtension, + } + depth := types.Depth(4) + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + + // Set up timing so parentClockDuration calculation works + currentTime := time.UnixMilli(100000) + systemClock := clock.NewDeterministicClock(currentTime) + l1Clock := clock.NewDeterministicClock(currentTime) + + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, maxClockDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, test.responseDelay, responseDelayAfter) + + // Create claims with proper parent-child relationship for chess clock calculation + claimBuilder := faulttest.NewClaimBuilder(t, depth, provider) + timeSinceCreation := 1 * time.Minute // Fixed component + + // Create grandparent claim (root claim) that has the accumulated time from previous moves + grandparentClaim := claimBuilder.CreateRootClaim(faulttest.WithClock( + currentTime.Add(-timeSinceCreation).Add(-time.Duration(test.parentClockDuration.Nanoseconds())), + test.parentClockDuration, + )) + grandparentClaim.ContractIndex = 0 // Root claim + + // Create parent claim as an attack on the grandparent (so it's NOT a root claim) + parentClaim := claimBuilder.AttackClaim(grandparentClaim, faulttest.WithClock( + currentTime.Add(-timeSinceCreation), + 0, // This will be calculated by ChessClock + )) + parentClaim.ContractIndex = 1 // Set contract index + + claimLoader.claims = []types.Claim{grandparentClaim, parentClaim} + + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: parentClaim, + IsAttack: true, + Value: common.Hash{0x01}, + } + + // Perform action and check timing + var wg sync.WaitGroup + wg.Add(1) + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + if test.expectDelay { + // Should wait for delay + systemClock.WaitForNewPendingTaskWithTimeout(30 * time.Second) + require.Equal(t, 0, responder.PerformedActionCount(), "Should be waiting for delay") + + // Advance clock and complete + systemClock.AdvanceTime(test.responseDelay) + } + + // Wait for completion - using longer timeout for CI reliability + select { + case <-done: + // Expected completion + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete - this indicates a test logic error") + } + wg.Wait() + + require.Equal(t, 1, responder.PerformedActionCount(), test.description) + }) + } +} + +// TestResponseDelayClockExtensionError tests error handling when clock extension detection fails +func TestResponseDelayClockExtensionError(t *testing.T) { + ctx := context.Background() + + // Set up agent with claimLoader that returns an error for clock extension + logger := testlog.Logger(t, log.LevelInfo) + claimLoader := &stubClaimLoader{ + clockExtensionErr: errors.New("failed to get clock extension"), + } + depth := types.Depth(4) + provider := alphabet.NewTraceProvider(big.NewInt(0), depth) + responder := &stubResponder{} + systemClock := clock.NewDeterministicClock(time.UnixMilli(120200)) + l1Clock := clock.NewDeterministicClock(time.UnixMilli(120200)) + + responseDelay := 2 * time.Hour + maxClockDuration := 10 * time.Minute // Use a reasonable default for error test + agent := NewAgent(metrics.NoopMetrics, systemClock, l1Clock, claimLoader, depth, maxClockDuration, trace.NewSimpleTraceAccessor(provider), responder, logger, false, []common.Address{}, responseDelay, 0) + + // Set up game state + claimBuilder := faulttest.NewClaimBuilder(t, depth, provider) + baseClaim := claimBuilder.CreateRootClaim() + claimLoader.claims = []types.Claim{baseClaim} + + action := types.Action{ + Type: types.ActionTypeMove, + ParentClaim: baseClaim, + IsAttack: true, + Value: common.Hash{0x01}, + } + + // Perform action - should still apply delay despite error + var wg sync.WaitGroup + wg.Add(1) + + done := make(chan struct{}) + go func() { + agent.performAction(ctx, &wg, createStubGame(claimLoader.claims), action) + close(done) + }() + + // Should complete without needing to advance clock (no delay applied for safety when extension detection fails) + select { + case <-done: + // Expected - immediate completion + case <-time.After(30 * time.Second): + t.Fatal("Action did not complete immediately when extension detection fails") + } + wg.Wait() + + require.Equal(t, 1, responder.PerformedActionCount(), "Should have performed action") } diff --git a/op-challenger/game/fault/clients.go b/op-challenger/game/fault/clients.go index 8651917169d7d..edb18e9bdea5e 100644 --- a/op-challenger/game/fault/clients.go +++ b/op-challenger/game/fault/clients.go @@ -7,9 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/super" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/dial" - "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" ) @@ -64,7 +62,7 @@ func (c *clientProvider) RollupClient() (RollupClient, error) { if c.rollupClient != nil { return c.rollupClient, nil } - rollupClient, err := dial.DialRollupClientWithTimeout(c.ctx, dial.DefaultDialTimeout, c.logger, c.cfg.RollupRpc) + rollupClient, err := dial.DialRollupClientWithTimeout(c.ctx, c.logger, c.cfg.RollupRpc) if err != nil { return nil, fmt.Errorf("dial rollup client %v: %w", c.cfg.RollupRpc, err) } @@ -75,11 +73,10 @@ func (c *clientProvider) RollupClient() (RollupClient, error) { } func (c *clientProvider) SuperchainClients() (super.RootProvider, *super.SyncValidator, error) { - cl, err := client.NewRPC(context.Background(), c.logger, c.cfg.SupervisorRPC) + supervisorClient, err := dial.DialSupervisorClientWithTimeout(c.ctx, c.logger, c.cfg.SupervisorRPC) if err != nil { return nil, nil, fmt.Errorf("failed to dial supervisor: %w", err) } - supervisorClient := sources.NewSupervisorClient(cl) c.rootProvider = supervisorClient c.toClose = append(c.toClose, supervisorClient.Close) return supervisorClient, super.NewSyncValidator(supervisorClient), nil diff --git a/op-challenger/game/fault/contracts/detect.go b/op-challenger/game/fault/contracts/detect.go index 0e6e0b3641eb3..32c8b03ed90b6 100644 --- a/op-challenger/game/fault/contracts/detect.go +++ b/op-challenger/game/fault/contracts/detect.go @@ -31,12 +31,14 @@ func DetectGameType(ctx context.Context, addr common.Address, caller *batching.M switch gameType { case faultTypes.CannonGameType, faultTypes.PermissionedGameType, + faultTypes.CannonKonaGameType, faultTypes.AsteriscGameType, faultTypes.AlphabetGameType, faultTypes.FastGameType, faultTypes.AsteriscKonaGameType, faultTypes.SuperCannonGameType, faultTypes.SuperPermissionedGameType, + faultTypes.SuperCannonKonaGameType, faultTypes.SuperAsteriscKonaGameType: return gameType, nil default: diff --git a/op-challenger/game/fault/contracts/faultdisputegame.go b/op-challenger/game/fault/contracts/faultdisputegame.go index 4ae3c351c480f..db904b05841d0 100644 --- a/op-challenger/game/fault/contracts/faultdisputegame.go +++ b/op-challenger/game/fault/contracts/faultdisputegame.go @@ -25,6 +25,7 @@ var maxChildChecks = big.NewInt(512) var ( methodMaxClockDuration = "maxClockDuration" + methodClockExtension = "clockExtension" methodMaxGameDepth = "maxGameDepth" methodAbsolutePrestate = "absolutePrestate" methodStatus = "status" @@ -80,7 +81,7 @@ func NewFaultDisputeGameContract(ctx context.Context, metrics metrics.ContractMe return nil, fmt.Errorf("failed to detect game type: %w", err) } switch gameType { - case types.SuperCannonGameType, types.SuperPermissionedGameType, types.SuperAsteriscKonaGameType: + case types.SuperCannonGameType, types.SuperCannonKonaGameType, types.SuperPermissionedGameType, types.SuperAsteriscKonaGameType: return NewSuperFaultDisputeGameContract(ctx, metrics, addr, caller) default: return NewPreInteropFaultDisputeGameContract(ctx, metrics, addr, caller) @@ -406,6 +407,15 @@ func (f *FaultDisputeGameContractLatest) GetMaxClockDuration(ctx context.Context return time.Duration(result.GetUint64(0)) * time.Second, nil } +func (f *FaultDisputeGameContractLatest) GetClockExtension(ctx context.Context) (time.Duration, error) { + defer f.metrics.StartContractRequest("GetClockExtension")() + result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodClockExtension)) + if err != nil { + return 0, fmt.Errorf("failed to fetch clock extension: %w", err) + } + return time.Duration(result.GetUint64(0)) * time.Second, nil +} + func (f *FaultDisputeGameContractLatest) GetMaxGameDepth(ctx context.Context) (types.Depth, error) { defer f.metrics.StartContractRequest("GetMaxGameDepth")() result, err := f.multiCaller.SingleCall(ctx, rpcblock.Latest, f.contract.Call(methodMaxGameDepth)) @@ -647,6 +657,7 @@ type FaultDisputeGameContract interface { GetWithdrawals(ctx context.Context, block rpcblock.Block, recipients ...common.Address) ([]*WithdrawalRequest, error) GetOracle(ctx context.Context) (PreimageOracleContract, error) GetMaxClockDuration(ctx context.Context) (time.Duration, error) + GetClockExtension(ctx context.Context) (time.Duration, error) GetMaxGameDepth(ctx context.Context) (types.Depth, error) GetAbsolutePrestateHash(ctx context.Context) (common.Hash, error) GetL1Head(ctx context.Context) (common.Hash, error) diff --git a/op-challenger/game/fault/contracts/faultdisputegame_test.go b/op-challenger/game/fault/contracts/faultdisputegame_test.go index 53a6f24afe33a..e5368ebd050e9 100644 --- a/op-challenger/game/fault/contracts/faultdisputegame_test.go +++ b/op-challenger/game/fault/contracts/faultdisputegame_test.go @@ -767,6 +767,9 @@ func TestFaultDisputeGame_IsResolved(t *testing.T) { ClaimData: faultTypes.ClaimData{ Bond: bond, }, + Clock: faultTypes.Clock{ + Timestamp: time.Unix(0, 0), + }, }) } } else { diff --git a/op-challenger/game/fault/player.go b/op-challenger/game/fault/player.go index cdc07d2acef9c..7094696af2df7 100644 --- a/op-challenger/game/fault/player.go +++ b/op-challenger/game/fault/player.go @@ -88,6 +88,8 @@ func NewGamePlayer( l1HeaderSource L1HeaderSource, selective bool, claimants []common.Address, + responseDelay time.Duration, + responseDelayAfter uint64, ) (*GamePlayer, error) { logger = logger.New("game", addr) @@ -150,7 +152,7 @@ func NewGamePlayer( return nil, fmt.Errorf("failed to create the responder: %w", err) } - agent := NewAgent(m, systemClock, l1Clock, loader, gameDepth, maxClockDuration, accessor, responder, logger, selective, claimants) + agent := NewAgent(m, systemClock, l1Clock, loader, gameDepth, maxClockDuration, accessor, responder, logger, selective, claimants, responseDelay, responseDelayAfter) return &GamePlayer{ act: agent.Act, loader: loader, diff --git a/op-challenger/game/fault/register.go b/op-challenger/game/fault/register.go index 68a4ad38ebd4b..94e0d7e91b9a6 100644 --- a/op-challenger/game/fault/register.go +++ b/op-challenger/game/fault/register.go @@ -67,6 +67,13 @@ func RegisterGameTypes( } registerTasks = append(registerTasks, NewCannonRegisterTask(faultTypes.CannonGameType, cfg, m, vm.NewOpProgramServerExecutor(logger), l2HeaderSource, rollupClient, syncValidator)) } + if cfg.TraceTypeEnabled(faultTypes.TraceTypeCannonKona) { + l2HeaderSource, rollupClient, syncValidator, err := clients.SingleChainClients() + if err != nil { + return nil, err + } + registerTasks = append(registerTasks, NewCannonKonaRegisterTask(faultTypes.CannonKonaGameType, cfg, m, vm.NewKonaExecutor(), l2HeaderSource, rollupClient, syncValidator)) + } if cfg.TraceTypeEnabled(faultTypes.TraceTypeSuperCannon) { rootProvider, syncValidator, err := clients.SuperchainClients() if err != nil { @@ -124,7 +131,7 @@ func RegisterGameTypes( registerTasks = append(registerTasks, NewAlphabetRegisterTask(faultTypes.AlphabetGameType, l2HeaderSource, rollupClient, syncValidator)) } for _, task := range registerTasks { - if err := task.Register(ctx, registry, oracles, systemClock, l1Clock, logger, m, txSender, gameFactory, caller, l1HeaderSource, selective, claimants); err != nil { + if err := task.Register(ctx, registry, oracles, systemClock, l1Clock, logger, m, txSender, gameFactory, caller, l1HeaderSource, selective, claimants, cfg.ResponseDelay, cfg.ResponseDelayAfter); err != nil { return clients.Close, fmt.Errorf("failed to register %v game type: %w", task.gameType, err) } } diff --git a/op-challenger/game/fault/register_task.go b/op-challenger/game/fault/register_task.go index e28e57189a9bd..d6d87bc1505c3 100644 --- a/op-challenger/game/fault/register_task.go +++ b/op-challenger/game/fault/register_task.go @@ -5,6 +5,7 @@ import ( "fmt" "net/url" "path/filepath" + "time" "github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/claims" @@ -87,6 +88,25 @@ func NewSuperCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config } func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator SyncValidator) *RegisterTask { + return newCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, l2Client, rollupClient, syncValidator, cfg.Cannon, cfg.CannonAbsolutePreStateBaseURL, cfg.CannonAbsolutePreState) +} + +func NewCannonKonaRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m caching.Metrics, serverExecutor vm.OracleServerExecutor, l2Client utils.L2HeaderSource, rollupClient outputs.OutputRollupClient, syncValidator SyncValidator) *RegisterTask { + return newCannonVMRegisterTaskWithConfig(gameType, cfg, m, serverExecutor, l2Client, rollupClient, syncValidator, cfg.CannonKona, cfg.CannonKonaAbsolutePreStateBaseURL, cfg.CannonKonaAbsolutePreState) +} + +func newCannonVMRegisterTaskWithConfig( + gameType faultTypes.GameType, + cfg *config.Config, + m caching.Metrics, + serverExecutor vm.OracleServerExecutor, + l2Client utils.L2HeaderSource, + rollupClient outputs.OutputRollupClient, + syncValidator SyncValidator, + vmCfg vm.Config, + preStateBaseURL *url.URL, + preState string, +) *RegisterTask { stateConverter := cannon.NewStateConverter(cfg.Cannon) return &RegisterTask{ gameType: gameType, @@ -102,9 +122,9 @@ func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m c gameType, stateConverter, m, - cfg.CannonAbsolutePreStateBaseURL, - cfg.CannonAbsolutePreState, - filepath.Join(cfg.Datadir, "cannon-prestates"), + preStateBaseURL, + preState, + filepath.Join(cfg.Datadir, vmCfg.VmType.String()+"-prestates"), func(ctx context.Context, path string) faultTypes.PrestateProvider { return vm.NewPrestateProvider(path, stateConverter) }), @@ -119,7 +139,7 @@ func NewCannonRegisterTask(gameType faultTypes.GameType, cfg *config.Config, m c prestateBlock uint64, poststateBlock uint64) (*trace.Accessor, error) { provider := vmPrestateProvider.(*vm.PrestateProvider) - return outputs.NewOutputCannonTraceAccessor(logger, m, cfg.Cannon, serverExecutor, l2Client, prestateProvider, provider.PrestatePath(), rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) + return outputs.NewOutputCannonTraceAccessor(logger, m, vmCfg, serverExecutor, l2Client, prestateProvider, provider.PrestatePath(), rollupClient, dir, l1Head, splitDepth, prestateBlock, poststateBlock) }, } } @@ -287,7 +307,9 @@ func (e *RegisterTask) Register( caller *batching.MultiCaller, l1HeaderSource L1HeaderSource, selective bool, - claimants []common.Address) error { + claimants []common.Address, + responseDelay time.Duration, + responseDelayAfter uint64) error { playerCreator := func(game types.GameMetadata, dir string) (scheduler.GamePlayer, error) { contract, err := contracts.NewFaultDisputeGameContract(ctx, m, game.Proxy, caller) @@ -337,7 +359,7 @@ func (e *RegisterTask) Register( validators = append(validators, NewPrestateValidator(e.gameType.String(), contract.GetAbsolutePrestateHash, vmPrestateProvider)) validators = append(validators, NewPrestateValidator("output root", contract.GetStartingRootHash, prestateProvider)) } - return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, e.syncValidator, validators, creator, l1HeaderSource, selective, claimants) + return NewGamePlayer(ctx, systemClock, l1Clock, logger, m, dir, game.Proxy, txSender, contract, e.syncValidator, validators, creator, l1HeaderSource, selective, claimants, responseDelay, responseDelayAfter) } err := registerOracle(ctx, logger, m, oracles, gameFactory, caller, e.gameType) if err != nil { diff --git a/op-challenger/game/fault/trace/outputs/output_asterisc.go b/op-challenger/game/fault/trace/outputs/output_asterisc.go index fcd17a2661aed..10822a5a9aeb4 100644 --- a/op-challenger/game/fault/trace/outputs/output_asterisc.go +++ b/op-challenger/game/fault/trace/outputs/output_asterisc.go @@ -45,7 +45,8 @@ func NewOutputAsteriscTraceAccessor( return provider, nil } - cache := NewProviderCache(m, "output_asterisc_provider", asteriscCreator) + metricsLabel := fmt.Sprintf("outputs_%s_provider", cfg.VmType.String()) + cache := NewProviderCache(m, metricsLabel, asteriscCreator) selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) return trace.NewAccessor(selector), nil } diff --git a/op-challenger/game/fault/trace/outputs/output_cannon.go b/op-challenger/game/fault/trace/outputs/output_cannon.go index 861ac62ba9837..a0679f8595b8e 100644 --- a/op-challenger/game/fault/trace/outputs/output_cannon.go +++ b/op-challenger/game/fault/trace/outputs/output_cannon.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "path/filepath" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -39,13 +40,13 @@ func NewOutputCannonTraceAccessor( subdir := filepath.Join(dir, localContext.Hex()) localInputs, err := utils.FetchLocalInputsFromProposals(ctx, l1Head.Hash, l2Client, agreed, claimed) if err != nil { - return nil, fmt.Errorf("failed to fetch cannon local inputs: %w", err) + return nil, fmt.Errorf("failed to fetch %s local inputs: %w", cfg.VmType, err) } provider := cannon.NewTraceProvider(logger, m.ToTypedVmMetrics(cfg.VmType.String()), cfg, serverExecutor, prestateProvider, cannonPrestate, localInputs, subdir, depth) return provider, nil } - cache := NewProviderCache(m, "output_cannon_provider", cannonCreator) + cache := NewProviderCache(m, fmt.Sprintf("output_%s_provider", strings.ReplaceAll(cfg.VmType.String(), "-", "_")), cannonCreator) selector := split.NewSplitProviderSelector(outputProvider, splitDepth, OutputRootSplitAdapter(outputProvider, cache.GetOrCreate)) return trace.NewAccessor(selector), nil } diff --git a/op-challenger/game/fault/trace/vm/executor.go b/op-challenger/game/fault/trace/vm/executor.go index c6339a6a723d0..38fe3791c84dd 100644 --- a/op-challenger/game/fault/trace/vm/executor.go +++ b/op-challenger/game/fault/trace/vm/executor.go @@ -57,6 +57,7 @@ type Config struct { Networks []string L2Custom bool RollupConfigPaths []string + L1GenesisPath string L2GenesisPaths []string DepsetConfigPath string } @@ -200,6 +201,7 @@ func (e *Executor) DoGenerateProof(ctx context.Context, dir string, begin uint64 memoryUsed = fmt.Sprintf("%d", uint64(info.MemoryUsed)) e.metrics.RecordMemoryUsed(uint64(info.MemoryUsed)) e.metrics.RecordSteps(info.Steps) + e.metrics.RecordInstructionCacheMissCount(info.InstructionCacheMissCount) e.metrics.RecordRmwSuccessCount(info.RmwSuccessCount) e.metrics.RecordRmwFailCount(info.RmwFailCount) e.metrics.RecordMaxStepsBetweenLLAndSC(info.MaxStepsBetweenLLAndSC) @@ -215,6 +217,7 @@ func (e *Executor) DoGenerateProof(ctx context.Context, dir string, begin uint64 type debugInfo struct { MemoryUsed hexutil.Uint64 `json:"memory_used"` Steps uint64 `json:"total_steps"` + InstructionCacheMissCount uint64 `json:"instruction_cache_miss_count"` RmwSuccessCount uint64 `json:"rmw_success_count"` RmwFailCount uint64 `json:"rmw_fail_count"` MaxStepsBetweenLLAndSC uint64 `json:"max_steps_between_ll_and_sc"` diff --git a/op-challenger/game/fault/trace/vm/executor_test.go b/op-challenger/game/fault/trace/vm/executor_test.go index ba6c4f8a269db..55b7ab2403720 100644 --- a/op-challenger/game/fault/trace/vm/executor_test.go +++ b/op-challenger/game/fault/trace/vm/executor_test.go @@ -229,21 +229,26 @@ func newMetrics() *capturingVmMetrics { } type capturingVmMetrics struct { - executionTimeRecordCount int - memoryUsed hexutil.Uint64 - steps uint64 - rmwSuccessCount uint64 - rmwFailCount uint64 - maxStepsBetweenLLAndSC uint64 - reservationInvalidations uint64 - forcedPreemptions uint64 - idleStepsThread0 uint64 + executionTimeRecordCount int + memoryUsed hexutil.Uint64 + steps uint64 + instructionCacheMissCount uint64 + rmwSuccessCount uint64 + rmwFailCount uint64 + maxStepsBetweenLLAndSC uint64 + reservationInvalidations uint64 + forcedPreemptions uint64 + idleStepsThread0 uint64 } func (c *capturingVmMetrics) RecordSteps(val uint64) { c.steps = val } +func (c *capturingVmMetrics) RecordInstructionCacheMissCount(val uint64) { + c.instructionCacheMissCount = val +} + func (c *capturingVmMetrics) RecordExecutionTime(t time.Duration) { c.executionTimeRecordCount += 1 } diff --git a/op-challenger/game/fault/trace/vm/kona_server_executor.go b/op-challenger/game/fault/trace/vm/kona_server_executor.go index 8f1e9b60872de..76da826975487 100644 --- a/op-challenger/game/fault/trace/vm/kona_server_executor.go +++ b/op-challenger/game/fault/trace/vm/kona_server_executor.go @@ -33,10 +33,10 @@ func (s *KonaExecutor) OracleCommand(cfg Config, dataDir string, inputs utils.Lo "--l1-beacon-address", cfg.L1Beacon, "--l2-node-address", cfg.L2s[0], "--l1-head", inputs.L1Head.Hex(), - "--l2-head", inputs.L2Head.Hex(), - "--l2-output-root", inputs.L2OutputRoot.Hex(), - "--l2-claim", inputs.L2Claim.Hex(), - "--l2-block-number", inputs.L2SequenceNumber.Text(10), + "--agreed-l2-head-hash", inputs.L2Head.Hex(), + "--agreed-l2-output-root", inputs.L2OutputRoot.Hex(), + "--claimed-l2-output-root", inputs.L2Claim.Hex(), + "--claimed-l2-block-number", inputs.L2SequenceNumber.Text(10), } if s.nativeMode { @@ -57,5 +57,9 @@ func (s *KonaExecutor) OracleCommand(cfg Config, dataDir string, inputs utils.Lo args = append(args, "--l2-chain-id", strconv.FormatUint(chainCfg.ChainID, 10)) } + if cfg.L1GenesisPath != "" { + args = append(args, "--l1-config-path", cfg.L1GenesisPath) + } + return args, nil } diff --git a/op-challenger/game/fault/trace/vm/kona_server_executor_test.go b/op-challenger/game/fault/trace/vm/kona_server_executor_test.go index 05413d8f4e038..8894bfab8b2fb 100644 --- a/op-challenger/game/fault/trace/vm/kona_server_executor_test.go +++ b/op-challenger/game/fault/trace/vm/kona_server_executor_test.go @@ -13,11 +13,12 @@ import ( func TestKonaFillHostCommand(t *testing.T) { dir := "mockdir" cfg := Config{ - L1: "http://localhost:8888", - L1Beacon: "http://localhost:9000", - L2s: []string{"http://localhost:9999"}, - Server: "./bin/mockserver", - Networks: []string{"op-mainnet"}, + L1: "http://localhost:8888", + L1Beacon: "http://localhost:9000", + L2s: []string{"http://localhost:9999"}, + Server: "./bin/mockserver", + Networks: []string{"op-mainnet"}, + L1GenesisPath: "mockdir/l1-genesis-1.json", } inputs := utils.LocalGameInputs{ L1Head: common.Hash{0x11}, @@ -39,8 +40,9 @@ func TestKonaFillHostCommand(t *testing.T) { require.True(t, slices.Contains(args, "--data-dir")) require.True(t, slices.Contains(args, "--l2-chain-id")) require.True(t, slices.Contains(args, "--l1-head")) - require.True(t, slices.Contains(args, "--l2-head")) - require.True(t, slices.Contains(args, "--l2-output-root")) - require.True(t, slices.Contains(args, "--l2-claim")) - require.True(t, slices.Contains(args, "--l2-block-number")) + require.True(t, slices.Contains(args, "--agreed-l2-head-hash")) + require.True(t, slices.Contains(args, "--agreed-l2-output-root")) + require.True(t, slices.Contains(args, "--claimed-l2-output-root")) + require.True(t, slices.Contains(args, "--claimed-l2-block-number")) + require.True(t, slices.Contains(args, "--l1-config-path")) } diff --git a/op-challenger/game/fault/trace/vm/kona_super_server_executor.go b/op-challenger/game/fault/trace/vm/kona_super_server_executor.go index 03a2747501903..5aa347f70e5f7 100644 --- a/op-challenger/game/fault/trace/vm/kona_super_server_executor.go +++ b/op-challenger/game/fault/trace/vm/kona_super_server_executor.go @@ -50,5 +50,9 @@ func (s *KonaSuperExecutor) OracleCommand(cfg Config, dataDir string, inputs uti args = append(args, "--rollup-config-paths", strings.Join(cfg.RollupConfigPaths, ",")) } + if cfg.L1GenesisPath != "" { + args = append(args, "--l1-config-path", cfg.L1GenesisPath) + } + return args, nil } diff --git a/op-challenger/game/fault/trace/vm/op_program_server_executor.go b/op-challenger/game/fault/trace/vm/op_program_server_executor.go index f50196c1cb402..1a5da6b9310b0 100644 --- a/op-challenger/game/fault/trace/vm/op_program_server_executor.go +++ b/op-challenger/game/fault/trace/vm/op_program_server_executor.go @@ -72,5 +72,8 @@ func (s *OpProgramServerExecutor) OracleCommand(cfg Config, dataDir string, inpu if cfg.L2Custom { args = append(args, "--l2.custom") } + if cfg.L1GenesisPath != "" { + args = append(args, "--l1.chainconfig", cfg.L1GenesisPath) + } return args, nil } diff --git a/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go b/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go index 7f9ce30a93a17..473317878c695 100644 --- a/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go +++ b/op-challenger/game/fault/trace/vm/op_program_server_executor_test.go @@ -33,10 +33,11 @@ func TestOpProgramFillHostCommand(t *testing.T) { oracleCommand := func(t *testing.T, lvl slog.Level, configModifier func(c *Config, inputs *utils.LocalGameInputs)) map[string]string { cfg := Config{ - L1: "http://localhost:8888", - L1Beacon: "http://localhost:9000", - L2s: []string{"http://localhost:9999", "http://localhost:9999/two"}, - Server: "./bin/mockserver", + L1: "http://localhost:8888", + L1Beacon: "http://localhost:9000", + L2s: []string{"http://localhost:9999", "http://localhost:9999/two"}, + Server: "./bin/mockserver", + L1GenesisPath: "mockdir/l1-genesis-1.json", } inputs := utils.LocalGameInputs{ L1Head: common.Hash{0x11}, @@ -60,6 +61,7 @@ func TestOpProgramFillHostCommand(t *testing.T) { require.Equal(t, inputs.L1Head.Hex(), pairs["--l1.head"]) require.Equal(t, inputs.L2Claim.Hex(), pairs["--l2.claim"]) require.Equal(t, inputs.L2SequenceNumber.String(), pairs["--l2.blocknumber"]) + require.Equal(t, cfg.L1GenesisPath, pairs["--l1.chainconfig"]) return pairs } diff --git a/op-challenger/game/fault/types/types.go b/op-challenger/game/fault/types/types.go index 66847cdf87f16..a76e881e31003 100644 --- a/op-challenger/game/fault/types/types.go +++ b/op-challenger/game/fault/types/types.go @@ -35,6 +35,8 @@ const ( SuperPermissionedGameType GameType = 5 OPSuccinctGameType GameType = 6 SuperAsteriscKonaGameType GameType = 7 + CannonKonaGameType GameType = 8 + SuperCannonKonaGameType GameType = 9 FastGameType GameType = 254 AlphabetGameType GameType = 255 KailuaGameType GameType = 1337 @@ -63,6 +65,10 @@ func (t GameType) String() string { return "op-succinct" case SuperAsteriscKonaGameType: return "super-asterisc-kona" + case CannonKonaGameType: + return "cannon-kona" + case SuperCannonKonaGameType: + return "super-cannon-kona" case FastGameType: return "fast" case AlphabetGameType: @@ -80,6 +86,7 @@ const ( TraceTypeAlphabet TraceType = "alphabet" TraceTypeFast TraceType = "fast" TraceTypeCannon TraceType = "cannon" + TraceTypeCannonKona TraceType = "cannon-kona" TraceTypeAsterisc TraceType = "asterisc" TraceTypeAsteriscKona TraceType = "asterisc-kona" TraceTypePermissioned TraceType = "permissioned" @@ -88,7 +95,7 @@ const ( TraceTypeSuperAsteriscKona TraceType = "super-asterisc-kona" ) -var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon, TraceTypePermissioned, TraceTypeAsterisc, TraceTypeAsteriscKona, TraceTypeFast, TraceTypeSuperCannon, TraceTypeSuperPermissioned, TraceTypeSuperAsteriscKona} +var TraceTypes = []TraceType{TraceTypeAlphabet, TraceTypeCannon, TraceTypeCannonKona, TraceTypePermissioned, TraceTypeAsterisc, TraceTypeAsteriscKona, TraceTypeFast, TraceTypeSuperCannon, TraceTypeSuperPermissioned, TraceTypeSuperAsteriscKona} func (t TraceType) String() string { return string(t) @@ -116,6 +123,8 @@ func (t TraceType) GameType() GameType { switch t { case TraceTypeCannon: return CannonGameType + case TraceTypeCannonKona: + return CannonKonaGameType case TraceTypePermissioned: return PermissionedGameType case TraceTypeAsterisc: diff --git a/op-challenger/game/monitor.go b/op-challenger/game/monitor.go index 326d099da3883..afcca6fa646f7 100644 --- a/op-challenger/game/monitor.go +++ b/op-challenger/game/monitor.go @@ -42,17 +42,19 @@ type claimer interface { } type gameMonitor struct { - logger log.Logger - clock RWClock - source gameSource - scheduler gameScheduler - preimages preimageScheduler - gameWindow time.Duration - claimer claimer - allowedGames []common.Address - l1HeadsSub ethereum.Subscription - l1Source *headSource - runState sync.Mutex + logger log.Logger + clock RWClock + source gameSource + scheduler gameScheduler + preimages preimageScheduler + gameWindow time.Duration + claimer claimer + allowedGames []common.Address + l1HeadsSub ethereum.Subscription + l1Source *headSource + runState sync.Mutex + minUpdatePeriod time.Duration + lastUpdateBlockTime time.Time } type MinimalSubscriber interface { @@ -77,17 +79,19 @@ func newGameMonitor( claimer claimer, allowedGames []common.Address, l1Source MinimalSubscriber, + minUpdatePeriodSeconds time.Duration, ) *gameMonitor { return &gameMonitor{ - logger: logger, - clock: cl, - scheduler: scheduler, - preimages: preimages, - source: source, - gameWindow: gameWindow, - claimer: claimer, - allowedGames: allowedGames, - l1Source: &headSource{inner: l1Source}, + logger: logger, + clock: cl, + scheduler: scheduler, + preimages: preimages, + source: source, + gameWindow: gameWindow, + claimer: claimer, + allowedGames: allowedGames, + l1Source: &headSource{inner: l1Source}, + minUpdatePeriod: minUpdatePeriodSeconds, } } @@ -128,12 +132,17 @@ func (m *gameMonitor) progressGames(ctx context.Context, blockHash common.Hash, return nil } -func (m *gameMonitor) onNewL1Head(ctx context.Context, sig eth.L1BlockRef) { - m.clock.SetTime(sig.Time) - if err := m.progressGames(ctx, sig.Hash, sig.Number); err != nil { +func (m *gameMonitor) onNewL1Head(ctx context.Context, block eth.L1BlockRef) { + m.clock.SetTime(block.Time) + blockTime := time.Unix(int64(block.Time), 0) + if m.lastUpdateBlockTime.Add(m.minUpdatePeriod).After(blockTime) { + return + } + m.lastUpdateBlockTime = blockTime + if err := m.progressGames(ctx, block.Hash, block.Number); err != nil { m.logger.Error("Failed to progress games", "err", err) } - if err := m.preimages.Schedule(sig.Hash, sig.Number); err != nil { + if err := m.preimages.Schedule(block.Hash, block.Number); err != nil { m.logger.Error("Failed to validate large preimages", "err", err) } } diff --git a/op-challenger/game/monitor_test.go b/op-challenger/game/monitor_test.go index ff37430865c9d..35b0dd98ae390 100644 --- a/op-challenger/game/monitor_test.go +++ b/op-challenger/game/monitor_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -26,7 +27,7 @@ func TestMonitorGames(t *testing.T) { t.Run("Schedules games", func(t *testing.T) { addr1 := common.Address{0xaa} addr2 := common.Address{0xbb} - monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}) + monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}, 0) source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} ctx, cancel := context.WithCancel(context.Background()) @@ -71,7 +72,7 @@ func TestMonitorGames(t *testing.T) { t.Run("Resubscribes on error", func(t *testing.T) { addr1 := common.Address{0xaa} addr2 := common.Address{0xbb} - monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}) + monitor, source, sched, mockHeadSource, preimages, _ := setupMonitorTest(t, []common.Address{}, 0) source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} ctx, cancel := context.WithCancel(context.Background()) @@ -117,7 +118,7 @@ func TestMonitorGames(t *testing.T) { } func TestMonitorCreateAndProgressGameAgents(t *testing.T) { - monitor, source, sched, _, _, _ := setupMonitorTest(t, []common.Address{}) + monitor, source, sched, _, _, _ := setupMonitorTest(t, []common.Address{}, 0) addr1 := common.Address{0xaa} addr2 := common.Address{0xbb} @@ -132,7 +133,7 @@ func TestMonitorCreateAndProgressGameAgents(t *testing.T) { func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) { addr1 := common.Address{0xaa} addr2 := common.Address{0xbb} - monitor, source, sched, _, _, stubClaimer := setupMonitorTest(t, []common.Address{addr2}) + monitor, source, sched, _, _, stubClaimer := setupMonitorTest(t, []common.Address{addr2}, 0) source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} require.NoError(t, monitor.progressGames(context.Background(), common.Hash{0x01}, 0)) @@ -142,6 +143,58 @@ func TestMonitorOnlyScheduleSpecifiedGame(t *testing.T) { require.Equal(t, 1, stubClaimer.scheduledGames) } +func TestMinUpdatePeriod(t *testing.T) { + tests := []struct { + name string + minUpdatePeriodSeconds int64 + processBlock2 bool + processBlock3 bool + }{ + {name: "ZeroUpdatePeriod", minUpdatePeriodSeconds: 0, processBlock2: true, processBlock3: true}, + {name: "SmallUpdatePeriod", minUpdatePeriodSeconds: 1, processBlock2: true, processBlock3: true}, + {name: "SkipBlockUpdatePeriod", minUpdatePeriodSeconds: 1000, processBlock2: false, processBlock3: true}, + {name: "LongUpdatePeriod", minUpdatePeriodSeconds: 1000000, processBlock2: false, processBlock3: false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + block1 := eth.L1BlockRef{ + Hash: common.HexToHash("0x1"), + Number: 1, + Time: 1_000_000, + } + block2 := eth.L1BlockRef{ + Hash: common.HexToHash("0x2"), + Number: 2, + Time: 1_000_500, + } + block3 := eth.L1BlockRef{ + Hash: common.HexToHash("0x2"), + Number: 2, + Time: 1_001_000, + } + addr1 := common.Address{0xaa} + addr2 := common.Address{0xbb} + monitor, source, sched, _, _, _ := setupMonitorTest(t, []common.Address{addr2}, test.minUpdatePeriodSeconds) + source.games = []types.GameMetadata{newFDG(addr1, 9999), newFDG(addr2, 9999)} + monitor.onNewL1Head(context.Background(), block1) + expectedScheduleCount := 1 + require.Len(t, sched.Scheduled(), expectedScheduleCount, "Should schedule update on first new block") + + monitor.onNewL1Head(context.Background(), block2) + if test.processBlock2 { + expectedScheduleCount++ + } + require.Len(t, sched.Scheduled(), expectedScheduleCount, "Should not schedule update prior to min update period being reached") + + monitor.onNewL1Head(context.Background(), block3) + if test.processBlock3 { + expectedScheduleCount++ + } + require.Len(t, sched.Scheduled(), expectedScheduleCount, "Should schedule update once min update period is reached") + }) + } +} + func newFDG(proxy common.Address, timestamp uint64) types.GameMetadata { return types.GameMetadata{ Proxy: proxy, @@ -152,6 +205,7 @@ func newFDG(proxy common.Address, timestamp uint64) types.GameMetadata { func setupMonitorTest( t *testing.T, allowedGames []common.Address, + minUpdatePeriodSeconds int64, ) (*gameMonitor, *stubGameSource, *stubScheduler, *mockNewHeadSource, *stubPreimageScheduler, *mockScheduler) { logger := testlog.Logger(t, log.LevelDebug) source := &stubGameSource{} @@ -169,6 +223,7 @@ func setupMonitorTest( stubClaimer, allowedGames, mockHeadSource, + time.Duration(minUpdatePeriodSeconds)*time.Second, ) return monitor, source, sched, mockHeadSource, preimages, stubClaimer } diff --git a/op-challenger/game/service.go b/op-challenger/game/service.go index bbeeb5694b9dd..ae3c1b6910411 100644 --- a/op-challenger/game/service.go +++ b/op-challenger/game/service.go @@ -234,7 +234,7 @@ func (s *Service) initLargePreimages() error { } func (s *Service) initMonitor(cfg *config.Config) { - s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, cfg.GameAllowlist, s.pollClient) + s.monitor = newGameMonitor(s.logger, s.l1Clock, s.factoryContract, s.sched, s.preimages, cfg.GameWindow, s.claimer, cfg.GameAllowlist, s.pollClient, cfg.MinUpdateInterval) } func (s *Service) Start(ctx context.Context) error { diff --git a/op-challenger/metrics/vm.go b/op-challenger/metrics/vm.go index 132bd9e713eaf..a3fa1cee9fa8b 100644 --- a/op-challenger/metrics/vm.go +++ b/op-challenger/metrics/vm.go @@ -13,6 +13,7 @@ type VmMetricer interface { RecordVmMemoryUsed(vmType string, memoryUsed uint64) RecordVmRmwSuccessCount(vmType string, val uint64) RecordVmSteps(vmType string, val uint64) + RecordVmInstructionCacheMissCount(vmType string, val uint64) RecordVmRmwFailCount(vmType string, val uint64) RecordVmMaxStepsBetweenLLAndSC(vmType string, val uint64) RecordVmReservationInvalidationCount(vmType string, val uint64) @@ -25,6 +26,7 @@ type TypedVmMetricer interface { RecordExecutionTime(t time.Duration) RecordMemoryUsed(memoryUsed uint64) RecordSteps(val uint64) + RecordInstructionCacheMissCount(val uint64) RecordRmwSuccessCount(val uint64) RecordRmwFailCount(val uint64) RecordMaxStepsBetweenLLAndSC(val uint64) @@ -34,15 +36,16 @@ type TypedVmMetricer interface { } type VmMetrics struct { - vmExecutionTime *prometheus.HistogramVec - vmMemoryUsed *prometheus.HistogramVec - vmSteps *prometheus.GaugeVec - vmRmwSuccessCount *prometheus.GaugeVec - vmRmwFailCount *prometheus.GaugeVec - vmMaxStepsBetweenLLAndSC *prometheus.GaugeVec - vmReservationInvalidations *prometheus.GaugeVec - vmForcedPreemptions *prometheus.GaugeVec - vmIdleStepsThread0 *prometheus.GaugeVec + vmExecutionTime *prometheus.HistogramVec + vmMemoryUsed *prometheus.HistogramVec + vmSteps *prometheus.GaugeVec + vmInstructionCacheMissCount *prometheus.GaugeVec + vmRmwSuccessCount *prometheus.GaugeVec + vmRmwFailCount *prometheus.GaugeVec + vmMaxStepsBetweenLLAndSC *prometheus.GaugeVec + vmReservationInvalidations *prometheus.GaugeVec + vmForcedPreemptions *prometheus.GaugeVec + vmIdleStepsThread0 *prometheus.GaugeVec } var _ VmMetricer = (*VmMetrics)(nil) @@ -59,6 +62,10 @@ func (m *VmMetrics) RecordVmSteps(vmType string, val uint64) { m.vmSteps.WithLabelValues(vmType).Set(float64(val)) } +func (m *VmMetrics) RecordVmInstructionCacheMissCount(vmType string, val uint64) { + m.vmInstructionCacheMissCount.WithLabelValues(vmType).Set(float64(val)) +} + func (m *VmMetrics) RecordVmRmwSuccessCount(vmType string, val uint64) { m.vmRmwSuccessCount.WithLabelValues(vmType).Set(float64(val)) } @@ -105,6 +112,11 @@ func NewVmMetrics(namespace string, factory metrics.Factory) *VmMetrics { Name: "vm_step_count", Help: "Number of steps executed during vm run", }, []string{"vm"}), + vmInstructionCacheMissCount: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "vm_instruction_cache_miss_count", + Help: "Number of instructions cache missed during vm run", + }, []string{"vm"}), vmRmwSuccessCount: factory.NewGaugeVec(prometheus.GaugeOpts{ Namespace: namespace, Name: "vm_rmw_success_count", @@ -145,6 +157,7 @@ var _ VmMetricer = NoopVmMetrics{} func (n NoopVmMetrics) RecordVmExecutionTime(vmType string, t time.Duration) {} func (n NoopVmMetrics) RecordVmMemoryUsed(vmType string, memoryUsed uint64) {} func (n NoopVmMetrics) RecordVmSteps(vmType string, val uint64) {} +func (n NoopVmMetrics) RecordVmInstructionCacheMissCount(vmType string, val uint64) {} func (n NoopVmMetrics) RecordVmRmwSuccessCount(vmType string, val uint64) {} func (n NoopVmMetrics) RecordVmRmwFailCount(vmType string, val uint64) {} func (n NoopVmMetrics) RecordVmMaxStepsBetweenLLAndSC(vmType string, val uint64) {} @@ -171,6 +184,10 @@ func (m *typedVmMetricsImpl) RecordSteps(val uint64) { m.m.RecordVmSteps(m.vmType, val) } +func (m *typedVmMetricsImpl) RecordInstructionCacheMissCount(val uint64) { + m.m.RecordVmInstructionCacheMissCount(m.vmType, val) +} + func (m *typedVmMetricsImpl) RecordRmwSuccessCount(val uint64) { m.m.RecordVmRmwSuccessCount(m.vmType, val) } diff --git a/op-challenger/runner/factory.go b/op-challenger/runner/factory.go index 8c059867cdb1e..218dadfd7a333 100644 --- a/op-challenger/runner/factory.go +++ b/op-challenger/runner/factory.go @@ -38,6 +38,15 @@ func createTraceProvider( } prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) return cannon.NewTraceProvider(logger, m, cfg.Cannon, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil + case types.TraceTypeCannonKona: + serverExecutor := vm.NewKonaExecutor() + stateConverter := cannon.NewStateConverter(cfg.CannonKona) + prestate, err := prestateSource.getPrestate(ctx, logger, cfg.CannonKonaAbsolutePreStateBaseURL, cfg.CannonKonaAbsolutePreState, dir, stateConverter) + if err != nil { + return nil, err + } + prestateProvider := vm.NewPrestateProvider(prestate, stateConverter) + return cannon.NewTraceProvider(logger, m, cfg.CannonKona, serverExecutor, prestateProvider, prestate, localInputs, dir, 42), nil case types.TraceTypeAsterisc: serverExecutor := vm.NewOpProgramServerExecutor(logger) stateConverter := asterisc.NewStateConverter(cfg.Asterisc) diff --git a/op-challenger/runner/game_inputs.go b/op-challenger/runner/game_inputs.go index 284bb1bda3b40..6a812ad6d3708 100644 --- a/op-challenger/runner/game_inputs.go +++ b/op-challenger/runner/game_inputs.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" "math/big" - "math/rand" + "math/rand/v2" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/super" "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/utils" @@ -37,9 +37,6 @@ func createGameInputsSingle(ctx context.Context, log log.Logger, client *sources } log.Info("Got sync status", "status", status, "type", typeName) - if status.FinalizedL2.Number == 0 { - return utils.LocalGameInputs{}, errors.New("safe head is 0") - } l1Head := status.FinalizedL1 if status.FinalizedL1.Number > status.CurrentL1.Number { // Restrict the L1 head to a block that has actually been processed by op-node. @@ -56,7 +53,7 @@ func createGameInputsSingle(ctx context.Context, log log.Logger, client *sources if l1Head.Number == 0 { return utils.LocalGameInputs{}, errors.New("l1 head is 0") } - blockNumber, err := findL2BlockNumberToDispute(ctx, log, client, l1Head.Number, status.FinalizedL2.Number) + blockNumber, err := findL2BlockNumberToDispute(ctx, log, client, l1Head.Number) if err != nil { return utils.LocalGameInputs{}, fmt.Errorf("failed to find l2 block number to dispute: %w", err) } @@ -101,7 +98,7 @@ func createGameInputsInterop(ctx context.Context, log log.Logger, client *source provider := super.NewSuperTraceProvider(log, nil, prestateProvider, client, l1Head.ID(), gameDepth, agreedTimestamp, claimTimestamp+10) var agreedPrestate []byte var claim common.Hash - switch rand.Intn(3) { + switch rand.IntN(3) { case 0: // Derive block on first chain log.Info("Running first chain") prestate, err := prestateProvider.AbsolutePreState(ctx) @@ -144,45 +141,45 @@ func createGameInputsInterop(ctx context.Context, log log.Logger, client *source return localInputs, nil } -func findL2BlockNumberToDispute(ctx context.Context, log log.Logger, client *sources.RollupClient, l1HeadNum uint64, l2BlockNum uint64) (uint64, error) { - // Try to find a L1 block prior to the batch that make l2BlockNum safe +// findL2BlockNumberToDispute finds a safe l2 block number at different positions in a span batch +func findL2BlockNumberToDispute(ctx context.Context, log log.Logger, client *sources.RollupClient, l1HeadNum uint64) (uint64, error) { + safeHead, err := client.SafeHeadAtL1Block(ctx, l1HeadNum) + if err != nil { + return 0, fmt.Errorf("failed to find safe head from l1 head %v: %w", l1HeadNum, err) + } + maxL2BlockNum := safeHead.SafeHead.Number + + // Find a prior span batch boundary // Limits how far back we search to 10 * 32 blocks const skipSize = uint64(32) for i := 0; i < 10; i++ { if l1HeadNum < skipSize { // Too close to genesis, give up and just use the original block log.Info("Failed to find prior batch.") - return l2BlockNum, nil + return maxL2BlockNum, nil } l1HeadNum -= skipSize prevSafeHead, err := client.SafeHeadAtL1Block(ctx, l1HeadNum) if err != nil { return 0, fmt.Errorf("failed to get prior safe head at L1 block %v: %w", l1HeadNum, err) } - if prevSafeHead.SafeHead.Number < l2BlockNum { - switch rand.Intn(3) { - case 0: // First block of span batch + if prevSafeHead.SafeHead.Number < maxL2BlockNum { + switch rand.IntN(3) { + case 0: // First block of span batch after prevSafeHead return prevSafeHead.SafeHead.Number + 1, nil - case 1: // Last block of span batch + case 1: // Last block of span batch ending at prevSafeHead return prevSafeHead.SafeHead.Number, nil case 2: // Random block, probably but not guaranteed to be in the middle of a span batch firstBlockInSpanBatch := prevSafeHead.SafeHead.Number + 1 - if l2BlockNum <= firstBlockInSpanBatch { + if maxL2BlockNum <= firstBlockInSpanBatch { // There is only one block in the next batch so we just have to use it - return l2BlockNum, nil + return maxL2BlockNum, nil } - offset := rand.Intn(int(l2BlockNum - firstBlockInSpanBatch)) + offset := rand.IntN(int(maxL2BlockNum - firstBlockInSpanBatch)) return firstBlockInSpanBatch + uint64(offset), nil } - - } - if prevSafeHead.SafeHead.Number < l2BlockNum { - // We walked back far enough to be before the batch that included l2BlockNum - // So use the first block after the prior safe head as the disputed block. - // It must be the first block in a batch. - return prevSafeHead.SafeHead.Number + 1, nil } } - log.Warn("Failed to find prior batch", "l2BlockNum", l2BlockNum, "earliestCheckL1Block", l1HeadNum) - return l2BlockNum, nil + log.Warn("Failed to find prior batch", "l2BlockNum", maxL2BlockNum, "earliestCheckL1Block", l1HeadNum) + return maxL2BlockNum, nil } diff --git a/op-challenger/runner/metrics.go b/op-challenger/runner/metrics.go index 921bdc8d02390..8aa7494fa2c04 100644 --- a/op-challenger/runner/metrics.go +++ b/op-challenger/runner/metrics.go @@ -20,13 +20,14 @@ type Metrics struct { *metrics.VmMetrics opmetrics.RPCMetrics - up prometheus.Gauge - vmLastExecutionTime *prometheus.GaugeVec - vmLastMemoryUsed *prometheus.GaugeVec - successTotal *prometheus.CounterVec - failuresTotal *prometheus.CounterVec - panicsTotal *prometheus.CounterVec - invalidTotal *prometheus.CounterVec + up prometheus.Gauge + vmLastExecutionTime *prometheus.GaugeVec + vmLastMemoryUsed *prometheus.GaugeVec + successTotal *prometheus.CounterVec + failuresTotal *prometheus.CounterVec + consecutiveFailuresCurrent *prometheus.GaugeVec + panicsTotal *prometheus.CounterVec + invalidTotal *prometheus.CounterVec } var _ Metricer = (*Metrics)(nil) @@ -72,6 +73,11 @@ func NewMetrics(runConfigs []RunConfig) *Metrics { Name: "failures_total", Help: "Number of failures to execute a VM", }, []string{"type"}), + consecutiveFailuresCurrent: factory.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: Namespace, + Name: "consecutive_failures_current", + Help: "Number of consecutive setup failures by VM type. Resets to 0 on any complete run.", + }, []string{"type"}), panicsTotal: factory.NewCounterVec(prometheus.CounterOpts{ Namespace: Namespace, Name: "panics_total", @@ -87,6 +93,7 @@ func NewMetrics(runConfigs []RunConfig) *Metrics { for _, runConfig := range runConfigs { metrics.successTotal.WithLabelValues(runConfig.Name).Add(0) metrics.failuresTotal.WithLabelValues(runConfig.Name).Add(0) + metrics.consecutiveFailuresCurrent.WithLabelValues(runConfig.Name).Set(0) metrics.panicsTotal.WithLabelValues(runConfig.Name).Add(0) metrics.invalidTotal.WithLabelValues(runConfig.Name).Add(0) metrics.RecordUp() @@ -116,16 +123,22 @@ func (m *Metrics) RecordVmMemoryUsed(vmType string, memoryUsed uint64) { func (m *Metrics) RecordSuccess(vmType string) { m.successTotal.WithLabelValues(vmType).Inc() + m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) } func (m *Metrics) RecordFailure(vmType string) { m.failuresTotal.WithLabelValues(vmType).Inc() + m.consecutiveFailuresCurrent.WithLabelValues(vmType).Inc() } func (m *Metrics) RecordPanic(vmType string) { m.panicsTotal.WithLabelValues(vmType).Inc() + // The result was bad, but we still completed setup successfully + m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) } func (m *Metrics) RecordInvalid(vmType string) { m.invalidTotal.WithLabelValues(vmType).Inc() + // The result was bad, but we still completed setup successfully + m.consecutiveFailuresCurrent.WithLabelValues(vmType).Set(0) } diff --git a/op-challenger/runner/runner.go b/op-challenger/runner/runner.go index 09bbf212de650..648bc4f8a3131 100644 --- a/op-challenger/runner/runner.go +++ b/op-challenger/runner/runner.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -89,7 +88,7 @@ func (r *Runner) Start(ctx context.Context) error { var rollupClient *sources.RollupClient if r.cfg.RollupRpc != "" { r.log.Info("Dialling rollup client", "url", r.cfg.RollupRpc) - cl, err := dial.DialRollupClientWithTimeout(ctx, 1*time.Minute, r.log, r.cfg.RollupRpc) + cl, err := dial.DialRollupClientWithTimeout(ctx, r.log, r.cfg.RollupRpc) if err != nil { return fmt.Errorf("failed to dial rollup client: %w", err) } @@ -98,14 +97,14 @@ func (r *Runner) Start(ctx context.Context) error { var supervisorClient *sources.SupervisorClient if r.cfg.SupervisorRPC != "" { r.log.Info("Dialling supervisor client", "url", r.cfg.SupervisorRPC) - rpcCl, err := dial.DialRPCClientWithTimeout(ctx, 1*time.Minute, r.log, r.cfg.SupervisorRPC) + cl, err := dial.DialSupervisorClientWithTimeout(ctx, r.log, r.cfg.SupervisorRPC) if err != nil { - return fmt.Errorf("failed to dial rollup client: %w", err) + return fmt.Errorf("failed to dial supervisor: %w", err) } - supervisorClient = sources.NewSupervisorClient(client.NewBaseRPCClient(rpcCl)) + supervisorClient = cl } - l1Client, err := dial.DialRPCClientWithTimeout(ctx, 1*time.Minute, r.log, r.cfg.L1EthRpc) + l1Client, err := dial.DialRPCClientWithTimeout(ctx, r.log, r.cfg.L1EthRpc) if err != nil { return fmt.Errorf("failed to dial l1 client: %w", err) } diff --git a/op-conductor/.mockery.yaml b/op-conductor/.mockery.yaml index a056b8a1de936..cdf46840f2615 100644 --- a/op-conductor/.mockery.yaml +++ b/op-conductor/.mockery.yaml @@ -12,4 +12,5 @@ packages: Consensus: github.com/ethereum-optimism/optimism/op-conductor/client: interfaces: - SequencerControl: \ No newline at end of file + SequencerControl: + ElP2PClient: \ No newline at end of file diff --git a/op-conductor/client/el.go b/op-conductor/client/el.go new file mode 100644 index 0000000000000..f836f3c3c60e3 --- /dev/null +++ b/op-conductor/client/el.go @@ -0,0 +1,55 @@ +package client + +import ( + "context" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/p2p" +) + +type ElP2PClient interface { + PeerCount(ctx context.Context) (int, error) +} + +type elP2PClientNet struct { + client *ethclient.Client +} + +var _ ElP2PClient = (*elP2PClientNet)(nil) + +func NewElP2PClientNet(client *ethclient.Client) ElP2PClient { + return &elP2PClientNet{ + client: client, + } +} + +func (c *elP2PClientNet) PeerCount(ctx context.Context) (int, error) { + var peerCount hexutil.Uint64 + if err := c.client.Client().Call(&peerCount, "net_peerCount"); err != nil { + return 0, err + } + + return int(peerCount), nil +} + +type elP2PClientAdmin struct { + client *ethclient.Client +} + +var _ ElP2PClient = (*elP2PClientAdmin)(nil) + +func NewElP2PClientAdmin(client *ethclient.Client) ElP2PClient { + return &elP2PClientAdmin{ + client: client, + } +} + +func (c *elP2PClientAdmin) PeerCount(ctx context.Context) (int, error) { + var peerCount []*p2p.PeerInfo + if err := c.client.Client().Call(&peerCount, "admin_peers"); err != nil { + return 0, err + } + + return len(peerCount), nil +} diff --git a/op-conductor/client/mocks/ElP2PClient.go b/op-conductor/client/mocks/ElP2PClient.go new file mode 100644 index 0000000000000..bcd77b2a5384a --- /dev/null +++ b/op-conductor/client/mocks/ElP2PClient.go @@ -0,0 +1,92 @@ +// Code generated by mockery; DO NOT EDIT. +// github.com/vektra/mockery +// template: testify + +package mocks + +import ( + "context" + + mock "github.com/stretchr/testify/mock" +) + +// NewElP2PClient creates a new instance of ElP2PClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewElP2PClient(t interface { + mock.TestingT + Cleanup(func()) +}) *ElP2PClient { + mock := &ElP2PClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} + +// ElP2PClient is an autogenerated mock type for the ElP2PClient type +type ElP2PClient struct { + mock.Mock +} + +type ElP2PClient_Expecter struct { + mock *mock.Mock +} + +func (_m *ElP2PClient) EXPECT() *ElP2PClient_Expecter { + return &ElP2PClient_Expecter{mock: &_m.Mock} +} + +// PeerCount provides a mock function for the type ElP2PClient +func (_mock *ElP2PClient) PeerCount(ctx context.Context) (int, error) { + ret := _mock.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for PeerCount") + } + + var r0 int + var r1 error + if returnFunc, ok := ret.Get(0).(func(context.Context) (int, error)); ok { + return returnFunc(ctx) + } + if returnFunc, ok := ret.Get(0).(func(context.Context) int); ok { + r0 = returnFunc(ctx) + } else { + r0 = ret.Get(0).(int) + } + if returnFunc, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = returnFunc(ctx) + } else { + r1 = ret.Error(1) + } + return r0, r1 +} + +// ElP2PClient_PeerCount_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PeerCount' +type ElP2PClient_PeerCount_Call struct { + *mock.Call +} + +// PeerCount is a helper method to define mock.On call +// - ctx +func (_e *ElP2PClient_Expecter) PeerCount(ctx interface{}) *ElP2PClient_PeerCount_Call { + return &ElP2PClient_PeerCount_Call{Call: _e.mock.On("PeerCount", ctx)} +} + +func (_c *ElP2PClient_PeerCount_Call) Run(run func(ctx context.Context)) *ElP2PClient_PeerCount_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *ElP2PClient_PeerCount_Call) Return(n int, err error) *ElP2PClient_PeerCount_Call { + _c.Call.Return(n, err) + return _c +} + +func (_c *ElP2PClient_PeerCount_Call) RunAndReturn(run func(ctx context.Context) (int, error)) *ElP2PClient_PeerCount_Call { + _c.Call.Return(run) + return _c +} diff --git a/op-conductor/conductor/config.go b/op-conductor/conductor/config.go index 6f067a370c1d2..90700b5a91498 100644 --- a/op-conductor/conductor/config.go +++ b/op-conductor/conductor/config.go @@ -146,6 +146,15 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*Config, error) { return nil, errors.Wrap(err, "failed to load rollup config") } + executionP2pRpcUrl := ctx.String(flags.HealthcheckExecutionP2pRPCUrl.Name) + if executionP2pRpcUrl == "" { + executionP2pRpcUrl = ctx.String(flags.ExecutionRPC.Name) + } + executionP2pCheckApi := ctx.String(flags.HealthcheckExecutionP2pCheckApi.Name) + if executionP2pCheckApi == "" { + executionP2pCheckApi = "net" + } + return &Config{ ConsensusAddr: ctx.String(flags.ConsensusAddr.Name), ConsensusPort: ctx.Int(flags.ConsensusPort.Name), @@ -167,11 +176,17 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*Config, error) { RollupBoostHealthcheckTimeout: ctx.Duration(flags.RollupBoostHealthcheckTimeout.Name), Paused: ctx.Bool(flags.Paused.Name), HealthCheck: HealthCheckConfig{ - Interval: ctx.Uint64(flags.HealthCheckInterval.Name), - UnsafeInterval: ctx.Uint64(flags.HealthCheckUnsafeInterval.Name), - SafeEnabled: ctx.Bool(flags.HealthCheckSafeEnabled.Name), - SafeInterval: ctx.Uint64(flags.HealthCheckSafeInterval.Name), - MinPeerCount: ctx.Uint64(flags.HealthCheckMinPeerCount.Name), + Interval: ctx.Uint64(flags.HealthCheckInterval.Name), + UnsafeInterval: ctx.Uint64(flags.HealthCheckUnsafeInterval.Name), + SafeEnabled: ctx.Bool(flags.HealthCheckSafeEnabled.Name), + SafeInterval: ctx.Uint64(flags.HealthCheckSafeInterval.Name), + MinPeerCount: ctx.Uint64(flags.HealthCheckMinPeerCount.Name), + ExecutionP2pEnabled: ctx.Bool(flags.HealthcheckExecutionP2pEnabled.Name), + ExecutionP2pMinPeerCount: ctx.Uint64(flags.HealthcheckExecutionP2pMinPeerCount.Name), + ExecutionP2pRPCUrl: executionP2pRpcUrl, + ExecutionP2pCheckApi: executionP2pCheckApi, + RollupBoostPartialHealthinessToleranceLimit: ctx.Uint64(flags.HealthCheckRollupBoostPartialHealthinessToleranceLimit.Name), + RollupBoostPartialHealthinessToleranceIntervalSeconds: ctx.Uint64(flags.HealthCheckRollupBoostPartialHealthinessToleranceIntervalSeconds.Name), }, RollupCfg: *rollupCfg, RPCEnableProxy: ctx.Bool(flags.RPCEnableProxy.Name), @@ -200,6 +215,24 @@ type HealthCheckConfig struct { // MinPeerCount is the minimum number of peers required for the sequencer to be healthy. MinPeerCount uint64 + + // ExecutionP2pEnabled is whether to enable EL P2P checks. + ExecutionP2pEnabled bool + + // ExecutionP2pRPC is the HTTP provider URL for EL P2P. + ExecutionP2pRPCUrl string + + // ExecutionP2pCheckApi is the API to use for EL P2P checks. + ExecutionP2pCheckApi string + + // ExecutionP2pMinPeerCount is the minimum number of EL P2P peers required for the sequencer to be healthy. + ExecutionP2pMinPeerCount uint64 + + // RollupBoostPartialHealthinessToleranceLimit is the amount of rollup-boost partial unhealthiness failures to tolerate within a configurable time frame + RollupBoostPartialHealthinessToleranceLimit uint64 + + // RollupBoostPartialHealthinessToleranceIntervalSeconds is the time frame within which `RollupBoostToleratePartialHealthinessToleranceIntervalLimit` is evaluated + RollupBoostPartialHealthinessToleranceIntervalSeconds uint64 } func (c *HealthCheckConfig) Check() error { @@ -212,5 +245,22 @@ func (c *HealthCheckConfig) Check() error { if c.MinPeerCount == 0 { return fmt.Errorf("missing minimum peer count") } + if c.ExecutionP2pEnabled { + if c.ExecutionP2pMinPeerCount == 0 { + return fmt.Errorf("missing minimum el p2p peers") + } + if c.ExecutionP2pRPCUrl == "" { + return fmt.Errorf("missing el p2p rpc") + } + if c.ExecutionP2pCheckApi == "" { + return fmt.Errorf("missing el p2p check api") + } + if c.ExecutionP2pCheckApi != "net" && c.ExecutionP2pCheckApi != "admin" { + return fmt.Errorf("invalid el p2p check api") + } + } + if (c.RollupBoostPartialHealthinessToleranceLimit != 0 && c.RollupBoostPartialHealthinessToleranceIntervalSeconds == 0) || (c.RollupBoostPartialHealthinessToleranceLimit == 0 && c.RollupBoostPartialHealthinessToleranceIntervalSeconds != 0) { + return fmt.Errorf("only one of RollupBoostPartialHealthinessToleranceLimit or RollupBoostPartialHealthinessToleranceIntervalSeconds found to be defined. Either define both of them or none.") + } return nil } diff --git a/op-conductor/conductor/service.go b/op-conductor/conductor/service.go index 2c7fc07136ff8..b9f25710c6bf3 100644 --- a/op-conductor/conductor/service.go +++ b/op-conductor/conductor/service.go @@ -220,15 +220,32 @@ func (c *OpConductor) initHealthMonitor(ctx context.Context) error { }) } + var elP2p client.ElP2PClient + if c.cfg.HealthCheck.ExecutionP2pEnabled { + execClient, err := dial.DialEthClientWithTimeout(ctx, 1*time.Minute, c.log, c.cfg.HealthCheck.ExecutionP2pRPCUrl) + if err != nil { + return errors.Wrap(err, "failed to create execution rpc client out of the el p2p rpc url: "+c.cfg.HealthCheck.ExecutionP2pRPCUrl) + } + switch c.cfg.HealthCheck.ExecutionP2pCheckApi { + case "net": + elP2p = client.NewElP2PClientNet(execClient) + case "admin": + elP2p = client.NewElP2PClientAdmin(execClient) + default: + return errors.New("invalid el p2p check api") + } + } else { + elP2p = nil + } + p2p := sources.NewP2PClient(nc) var supervisor health.SupervisorHealthAPI if c.cfg.SupervisorRPC != "" { - sc, err := opclient.NewRPC(ctx, c.log, c.cfg.SupervisorRPC) + supervisor, err = dial.DialSupervisorClientWithTimeout(ctx, c.log, c.cfg.SupervisorRPC) if err != nil { - return errors.Wrap(err, "failed to create supervisor rpc client") + return errors.Wrap(err, "failed to dial supervisor") } - supervisor = sources.NewSupervisorClient(sc) } c.hmon = health.NewSequencerHealthMonitor( @@ -244,6 +261,10 @@ func (c *OpConductor) initHealthMonitor(ctx context.Context) error { p2p, supervisor, rb, + elP2p, + c.cfg.HealthCheck.ExecutionP2pMinPeerCount, + c.cfg.HealthCheck.RollupBoostPartialHealthinessToleranceLimit, + c.cfg.HealthCheck.RollupBoostPartialHealthinessToleranceIntervalSeconds, ) c.healthUpdateCh = c.hmon.Subscribe() @@ -281,7 +302,7 @@ func (oc *OpConductor) initRPCServer(ctx context.Context) error { Service: execMinerProxy, }) - nodeClient, err := dial.DialRollupClientWithTimeout(ctx, 1*time.Minute, oc.log, oc.cfg.NodeRPC) + nodeClient, err := dial.DialRollupClientWithTimeout(ctx, oc.log, oc.cfg.NodeRPC) if err != nil { return errors.Wrap(err, "failed to create node rpc client") } @@ -739,10 +760,8 @@ func (oc *OpConductor) action() { case status.leader && !status.healthy && status.active: // There are two scenarios we need to handle here: // 1. we're transitioned from case status.leader && !status.healthy && !status.active, see description above - // then we should continue to sequence blocks and try to bring ourselves back to healthy state. - // note: we need to also make sure that the health error is not due to ErrSequencerConnectionDown - // because in this case, we should stop sequencing and transfer leadership to other nodes. - if oc.prevState.leader && !oc.prevState.healthy && !oc.prevState.active && !errors.Is(oc.hcerr, health.ErrSequencerConnectionDown) { + // then we should continue to sequence blocks and try to bring ourselves back to healthy state (if possible) + if oc.shouldWaitForHealthRecovery() { err = errors.New("waiting for sequencing to become healthy by itself") break } @@ -924,3 +943,24 @@ func (oc *OpConductor) updateSequencerActiveStatus() error { oc.seqActive.Store(active) return nil } + +// shouldWaitForHealthRecovery determines if the conductor should wait for the sequencer +// to recover health naturally instead of transferring leadership. +func (oc *OpConductor) shouldWaitForHealthRecovery() bool { + // Only wait for recovery if we transitioned from [leader, unhealthy, inactive] state + if !oc.prevState.leader || oc.prevState.healthy || oc.prevState.active { + return false + } + + // Don't wait if the error is a connection issue - transfer leadership instead + if errors.Is(oc.hcerr, health.ErrSequencerConnectionDown) { + return false + } + + // Don't wait if rollup boost is enabled and partially healthy - transfer leadership instead + if oc.cfg.RollupBoostEnabled && errors.Is(oc.hcerr, health.ErrRollupBoostPartiallyHealthy) { + return false + } + + return true +} diff --git a/op-conductor/conductor/service_test.go b/op-conductor/conductor/service_test.go index e64474eecb0aa..d42289d08d498 100644 --- a/op-conductor/conductor/service_test.go +++ b/op-conductor/conductor/service_test.go @@ -1183,3 +1183,43 @@ connected: // Verify that the conductor is stopped s.True(conductor.Stopped()) } + +// TestRollupBoostPartialFailure tests that OpConductor correctly handles rollup boost partial health failures. +// This test verifies that when a leader is unhealthy and actively sequencing due to ErrRollupBoostPartiallyHealthy, +// it should stop sequencing and transfer leadership instead of waiting for health recovery. +// Scenario: [leader, unhealthy, active] with prevState [leader, unhealthy, inactive] and ErrRollupBoostPartiallyHealthy +// Expected: Stop sequencing and transfer leadership (not wait for recovery) +func (s *OpConductorTestSuite) TestRollupBoostPartialFailure() { + s.enableSynchronization() + + // Set initial state: leader is unhealthy and actively sequencing + // Previous state was [leader, unhealthy, inactive] - this simulates the scenario where + // the leader started sequencing during a network stall but rollup boost is partially healthy + s.conductor.leader.Store(true) + s.conductor.healthy.Store(false) + s.conductor.seqActive.Store(true) + s.conductor.prevState = &state{ + leader: true, + healthy: false, + active: false, + } + s.conductor.cfg.RollupBoostEnabled = true + + // Setup expectations - with ErrRollupBoostPartiallyHealthy, conductor should NOT wait for recovery + // Instead, it should stop sequencing and transfer leadership to another node + s.ctrl.EXPECT().StopSequencer(mock.Anything).Return(common.Hash{}, nil).Times(1) + s.cons.EXPECT().TransferLeader().Return(nil).Times(1) + + // Trigger the health update with rollup boost partial failure + s.updateHealthStatusAndExecuteAction(health.ErrRollupBoostPartiallyHealthy) + + // Verify the conductor stops sequencing and transfers leadership instead of waiting for recovery + s.False(s.conductor.leader.Load(), "Should transfer leadership to another node") + s.False(s.conductor.healthy.Load(), "Should remain marked as unhealthy") + s.False(s.conductor.seqActive.Load(), "Should stop sequencing") + s.Equal(health.ErrRollupBoostPartiallyHealthy, s.conductor.hcerr, "Should store the rollup boost error") + + // Verify the expected actions were taken + s.ctrl.AssertNumberOfCalls(s.T(), "StopSequencer", 1) + s.cons.AssertNumberOfCalls(s.T(), "TransferLeader", 1) +} diff --git a/op-conductor/flags/flags.go b/op-conductor/flags/flags.go index b6451ad05b47e..96e6eeafd51ab 100644 --- a/op-conductor/flags/flags.go +++ b/op-conductor/flags/flags.go @@ -158,6 +158,38 @@ var ( EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "WEBSOCKET_SERVER_PORT"), Value: 8546, } + HealthcheckExecutionP2pEnabled = &cli.BoolFlag{ + Name: "healthcheck.execution-p2p-enabled", + Usage: "Whether to enable EL P2P checks", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_EXECUTION_P2P_ENABLED"), + Value: false, + } + HealthcheckExecutionP2pMinPeerCount = &cli.Uint64Flag{ + Name: "healthcheck.execution-p2p-min-peer-count", + Usage: "Minimum number of EL P2P peers required to be considered healthy", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_EXECUTION_P2P_MIN_PEER_COUNT"), + } + HealthcheckExecutionP2pRPCUrl = &cli.StringFlag{ + Name: "healthcheck.execution-p2p-rpc-url", + Usage: "URL override for the execution layer RPC client for the sake of p2p healthcheck. If not set, the execution RPC URL will be used.", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_EXECUTION_P2P_RPC_URL"), + } + HealthcheckExecutionP2pCheckApi = &cli.StringFlag{ + Name: "healthcheck.execution-p2p-check-api", + Usage: "Type of EL P2P check to perform. If not set, the default `net` type will be used corresponding to the `net_peerCount` RPC call.", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_EXECUTION_P2P_CHECK_API"), + Value: "net", + } + HealthCheckRollupBoostPartialHealthinessToleranceLimit = &cli.Uint64Flag{ + Name: "healthcheck.rollup-boost-partial-healthiness-tolerance-limit", + Usage: "Sets the count of rollup-boost partial healthiness failures to occur before marking op-conducto as unhealthy. Default is 0 with which a single occurrence of rollup-boost partial healthiness is enough to set op-conductor as unhealthy", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_ROLLUP_BOOST_PARTIAL_HEALTHINESS_TOLERANCE_LIMIT"), + } + HealthCheckRollupBoostPartialHealthinessToleranceIntervalSeconds = &cli.Uint64Flag{ + Name: "healthcheck.rollup-boost-partial-healthiness-tolerance-interval-seconds", + Usage: "The time frame within which rollup-boost partial healthiness tolerance is evaluated", + EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "HEALTHCHECK_ROLLUP_BOOST_PARTIAL_HEALTHINESS_TOLERANCE_INTERVAL_SECONDS"), + } ) var requiredFlags = []cli.Flag{ @@ -187,6 +219,12 @@ var optionalFlags = []cli.Flag{ SupervisorRPC, RollupBoostEnabled, RollupBoostHealthcheckTimeout, + HealthcheckExecutionP2pEnabled, + HealthcheckExecutionP2pMinPeerCount, + HealthcheckExecutionP2pRPCUrl, + HealthcheckExecutionP2pCheckApi, + HealthCheckRollupBoostPartialHealthinessToleranceLimit, + HealthCheckRollupBoostPartialHealthinessToleranceIntervalSeconds, } func init() { diff --git a/op-conductor/health/monitor.go b/op-conductor/health/monitor.go index 70f667d81a826..dc5bd88fbd4b2 100644 --- a/op-conductor/health/monitor.go +++ b/op-conductor/health/monitor.go @@ -39,8 +39,8 @@ type HealthMonitor interface { // interval is the interval between health checks measured in seconds. // safeInterval is the interval between safe head progress measured in seconds. // minPeerCount is the minimum number of peers required for the sequencer to be healthy. -func NewSequencerHealthMonitor(log log.Logger, metrics metrics.Metricer, interval, unsafeInterval, safeInterval, minPeerCount uint64, safeEnabled bool, rollupCfg *rollup.Config, node dial.RollupClientInterface, p2p apis.P2PClient, supervisor SupervisorHealthAPI, rb client.RollupBoostClient) HealthMonitor { - return &SequencerHealthMonitor{ +func NewSequencerHealthMonitor(log log.Logger, metrics metrics.Metricer, interval, unsafeInterval, safeInterval, minPeerCount uint64, safeEnabled bool, rollupCfg *rollup.Config, node dial.RollupClientInterface, p2p apis.P2PClient, supervisor SupervisorHealthAPI, rb client.RollupBoostClient, elP2pClient client.ElP2PClient, minElP2pPeers uint64, rollupBoostToleratePartialHealthinessToleranceLimit uint64, rollupBoostToleratePartialHealthinessToleranceIntervalSeconds uint64) HealthMonitor { + hm := &SequencerHealthMonitor{ log: log, metrics: metrics, interval: interval, @@ -50,12 +50,36 @@ func NewSequencerHealthMonitor(log log.Logger, metrics metrics.Metricer, interva safeEnabled: safeEnabled, safeInterval: safeInterval, minPeerCount: minPeerCount, - timeProviderFn: currentTimeProvicer, + timeProviderFn: currentTimeProvider, node: node, p2p: p2p, supervisor: supervisor, rb: rb, } + + if elP2pClient != nil { + hm.elP2p = &ElP2pHealthMonitor{ + log: log, + minPeerCount: minElP2pPeers, + elP2pClient: elP2pClient, + } + } + if rollupBoostToleratePartialHealthinessToleranceLimit != 0 { + hm.rollupBoostPartialHealthinessToleranceLimit = rollupBoostToleratePartialHealthinessToleranceLimit + var err error + hm.rollupBoostPartialHealthinessToleranceCounter, err = NewTimeBoundedRotatingCounter(rollupBoostToleratePartialHealthinessToleranceIntervalSeconds) + if err != nil { + panic(fmt.Errorf("failed to setup health monitor: %w", err)) + } + } + + return hm +} + +type ElP2pHealthMonitor struct { + log log.Logger + minPeerCount uint64 + elP2pClient client.ElP2PClient } // SequencerHealthMonitor monitors sequencer health. @@ -77,10 +101,13 @@ type SequencerHealthMonitor struct { timeProviderFn func() uint64 - node dial.RollupClientInterface - p2p apis.P2PClient - supervisor SupervisorHealthAPI - rb client.RollupBoostClient + node dial.RollupClientInterface + p2p apis.P2PClient + supervisor SupervisorHealthAPI + rb client.RollupBoostClient + elP2p *ElP2pHealthMonitor + rollupBoostPartialHealthinessToleranceLimit uint64 + rollupBoostPartialHealthinessToleranceCounter *timeBoundedRotatingCounter } var _ HealthMonitor = (*SequencerHealthMonitor)(nil) @@ -148,6 +175,13 @@ func (hm *SequencerHealthMonitor) healthCheck(ctx context.Context) error { return err } + if hm.elP2p != nil { + err = hm.elP2p.checkElP2p(ctx) + if err != nil { + return err + } + } + err = hm.checkRollupBoost(ctx) if err != nil { return err @@ -157,6 +191,19 @@ func (hm *SequencerHealthMonitor) healthCheck(ctx context.Context) error { return nil } +func (hm *ElP2pHealthMonitor) checkElP2p(ctx context.Context) error { + peerCount, err := hm.elP2pClient.PeerCount(ctx) + if err != nil { + return err + } + + if peerCount < int(hm.minPeerCount) { + hm.log.Error("el p2p peer count is below minimum", "peerCount", peerCount, "minPeerCount", hm.minPeerCount) + return ErrSequencerNotHealthy + } + + return nil +} func (hm *SequencerHealthMonitor) checkNode(ctx context.Context) error { err := hm.checkNodeSyncStatus(ctx) if err != nil { @@ -251,8 +298,14 @@ func (hm *SequencerHealthMonitor) checkRollupBoost(ctx context.Context) error { case client.HealthStatusHealthy: return nil case client.HealthStatusPartial: + if hm.rollupBoostPartialHealthinessToleranceCounter != nil && hm.rollupBoostPartialHealthinessToleranceCounter.CurrentValue() < hm.rollupBoostPartialHealthinessToleranceLimit { + latestValue := hm.rollupBoostPartialHealthinessToleranceCounter.Increment() + hm.log.Debug("Rollup boost partial unhealthiness failure tolerated", "currentValue", latestValue, "limit", hm.rollupBoostPartialHealthinessToleranceLimit) + return nil + } hm.log.Error("Rollup boost is partial failure, builder is down but fallback execution client is up", "err", ErrRollupBoostPartiallyHealthy) return ErrRollupBoostPartiallyHealthy + case client.HealthStatusUnhealthy: hm.log.Error("Rollup boost total failure, both builder and fallback execution client are down", "err", ErrRollupBoostNotHealthy) return ErrRollupBoostNotHealthy @@ -269,6 +322,6 @@ func calculateTimeDiff(now, then uint64) uint64 { return now - then } -func currentTimeProvicer() uint64 { +func currentTimeProvider() uint64 { return uint64(time.Now().Unix()) } diff --git a/op-conductor/health/monitor_test.go b/op-conductor/health/monitor_test.go index 4940dc2d884f6..af85b8ff4270c 100644 --- a/op-conductor/health/monitor_test.go +++ b/op-conductor/health/monitor_test.go @@ -24,11 +24,14 @@ import ( ) const ( - unhealthyPeerCount = 0 - minPeerCount = 1 - healthyPeerCount = 2 - blockTime = 2 - interval = 1 + unhealthyPeerCount = 0 + minPeerCount = 1 + healthyPeerCount = 2 + blockTime = 2 + interval = 1 + minElP2pPeerCount = 2 + healthyElP2pPeerCount = 3 + unhealthyElP2pPeerCount = 1 ) type HealthMonitorTestSuite struct { @@ -38,6 +41,8 @@ type HealthMonitorTestSuite struct { interval uint64 minPeerCount uint64 rollupCfg *rollup.Config + + minElP2pPeerCount uint64 } func (s *HealthMonitorTestSuite) SetupSuite() { @@ -47,6 +52,7 @@ func (s *HealthMonitorTestSuite) SetupSuite() { s.rollupCfg = &rollup.Config{ BlockTime: blockTime, } + s.minElP2pPeerCount = minElP2pPeerCount } func (s *HealthMonitorTestSuite) SetupMonitor( @@ -54,6 +60,7 @@ func (s *HealthMonitorTestSuite) SetupMonitor( mockRollupClient *testutils.MockRollupClient, mockP2P *p2pMocks.API, mockSupervisorHealthAPI SupervisorHealthAPI, + elP2pClient client.ElP2PClient, ) *SequencerHealthMonitor { tp := &timeProvider{now: now} if mockP2P == nil { @@ -78,17 +85,28 @@ func (s *HealthMonitorTestSuite) SetupMonitor( p2p: mockP2P, supervisor: mockSupervisorHealthAPI, } + if elP2pClient != nil { + monitor.elP2p = &ElP2pHealthMonitor{ + log: s.log, + minPeerCount: s.minElP2pPeerCount, + elP2pClient: elP2pClient, + } + } err := monitor.Start(context.Background()) s.NoError(err) return monitor } +type monitorOpts func(*SequencerHealthMonitor) + // SetupMonitorWithRollupBoost creates a HealthMonitor that includes a RollupBoostClient func (s *HealthMonitorTestSuite) SetupMonitorWithRollupBoost( now, unsafeInterval, safeInterval uint64, mockRollupClient *testutils.MockRollupClient, mockP2P *p2pMocks.API, mockRollupBoost *clientmocks.RollupBoostClient, + elP2pClient client.ElP2PClient, + opts ...monitorOpts, ) *SequencerHealthMonitor { tp := &timeProvider{now: now} if mockP2P == nil { @@ -111,7 +129,19 @@ func (s *HealthMonitorTestSuite) SetupMonitorWithRollupBoost( timeProviderFn: tp.Now, node: mockRollupClient, p2p: mockP2P, - rb: mockRollupBoost, + } + if mockRollupBoost != nil { + monitor.rb = mockRollupBoost + } + if elP2pClient != nil { + monitor.elP2p = &ElP2pHealthMonitor{ + log: s.log, + minPeerCount: s.minElP2pPeerCount, + elP2pClient: elP2pClient, + } + } + for _, opt := range opts { + opt(monitor) } err := monitor.Start(context.Background()) s.NoError(err) @@ -133,7 +163,34 @@ func (s *HealthMonitorTestSuite) TestUnhealthyLowPeerCount() { } pc.EXPECT().PeerStats(mock.Anything).Return(ps1, nil).Times(1) - monitor := s.SetupMonitor(now, 60, 60, rc, pc, nil) + monitor := s.SetupMonitor(now, 60, 60, rc, pc, nil, nil) + + healthUpdateCh := monitor.Subscribe() + healthFailure := <-healthUpdateCh + s.NotNil(healthFailure) + + s.NoError(monitor.Stop()) +} + +func (s *HealthMonitorTestSuite) TestUnhealthyLowElP2pPeerCount() { + s.T().Parallel() + now := uint64(time.Now().Unix()) + + rc := &testutils.MockRollupClient{} + ss1 := mockSyncStatus(now-1, 1, now-3, 0) + rc.ExpectSyncStatus(ss1, nil) + rc.ExpectSyncStatus(ss1, nil) + + healthyPc := &p2pMocks.API{} + ps1 := &apis.PeerStats{ + Connected: healthyPeerCount, + } + healthyPc.EXPECT().PeerStats(mock.Anything).Return(ps1, nil).Times(1) + + elP2pClient := &clientmocks.ElP2PClient{} + elP2pClient.EXPECT().PeerCount(mock.Anything).Return(unhealthyElP2pPeerCount, nil).Times(1) + + monitor := s.SetupMonitor(now, 60, 60, rc, healthyPc, nil, elP2pClient) healthUpdateCh := monitor.Subscribe() healthFailure := <-healthUpdateCh @@ -153,7 +210,10 @@ func (s *HealthMonitorTestSuite) TestUnhealthyUnsafeHeadNotProgressing() { rc.ExpectSyncStatus(ss1, nil) } - monitor := s.SetupMonitor(now, uint64(unsafeBlocksInterval), 60, rc, nil, nil) + elP2pClient := &clientmocks.ElP2PClient{} + elP2pClient.EXPECT().PeerCount(mock.Anything).Return(healthyElP2pPeerCount, nil) + + monitor := s.SetupMonitor(now, uint64(unsafeBlocksInterval), 60, rc, nil, nil, elP2pClient) healthUpdateCh := monitor.Subscribe() // once the unsafe interval is surpassed, we should expect "unsafe head is falling behind the unsafe interval" @@ -183,7 +243,7 @@ func (s *HealthMonitorTestSuite) TestUnhealthySafeHeadNotProgressing() { rc.ExpectSyncStatus(mockSyncStatus(now+4, 3, now, 1), nil) rc.ExpectSyncStatus(mockSyncStatus(now+4, 3, now, 1), nil) - monitor := s.SetupMonitor(now, 60, 3, rc, nil, nil) + monitor := s.SetupMonitor(now, 60, 3, rc, nil, nil, nil) healthUpdateCh := monitor.Subscribe() for i := 0; i < 5; i++ { @@ -209,6 +269,9 @@ func (s *HealthMonitorTestSuite) TestHealthyWithUnsafeLag() { s.T().Parallel() now := uint64(time.Now().Unix()) + elP2pClient := &clientmocks.ElP2PClient{} + elP2pClient.EXPECT().PeerCount(mock.Anything).Return(healthyElP2pPeerCount, nil) + rc := &testutils.MockRollupClient{} // although unsafe has lag of 20 seconds, it's within the configured unsafe interval // and it is advancing every block time, so it should be considered safe. @@ -218,7 +281,7 @@ func (s *HealthMonitorTestSuite) TestHealthyWithUnsafeLag() { // in this case now time is behind unsafe head time, this should still be considered healthy. rc.ExpectSyncStatus(mockSyncStatus(now+5, 2, now, 1), nil) - monitor := s.SetupMonitor(now, 60, 60, rc, nil, nil) + monitor := s.SetupMonitor(now, 60, 60, rc, nil, nil, elP2pClient) healthUpdateCh := monitor.Subscribe() // confirm initial state @@ -262,7 +325,7 @@ func (s *HealthMonitorTestSuite) TestHealthySupervisor() { su := &mocks.SupervisorHealthAPI{} su.EXPECT().SyncStatus(mock.Anything).Return(eth.SupervisorSyncStatus{}, nil).Times(1) - monitor := s.SetupMonitor(now, 60, 60, rc, nil, su) + monitor := s.SetupMonitor(now, 60, 60, rc, nil, su, nil) healthUpdateCh := monitor.Subscribe() healthFailure := <-healthUpdateCh @@ -283,7 +346,7 @@ func (s *HealthMonitorTestSuite) TestUnhealthySupervisorConnectionDown() { su := &mocks.SupervisorHealthAPI{} su.EXPECT().SyncStatus(mock.Anything).Return(eth.SupervisorSyncStatus{}, errors.New("supervisor connection down")).Times(1) - monitor := s.SetupMonitor(now, 60, 60, rc, nil, su) + monitor := s.SetupMonitor(now, 60, 60, rc, nil, su, nil) healthUpdateCh := monitor.Subscribe() healthFailure := <-healthUpdateCh @@ -313,7 +376,7 @@ func (s *HealthMonitorTestSuite) TestRollupBoostConnectionDown() { rb.EXPECT().Healthcheck(mock.Anything).Return(client.HealthStatus(""), errors.New("connection refused")) // Start monitor with all dependencies - monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb) + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, nil) // Check for connection down error healthUpdateCh := monitor.Subscribe() @@ -344,7 +407,7 @@ func (s *HealthMonitorTestSuite) TestRollupBoostNotHealthy() { rb.EXPECT().Healthcheck(mock.Anything).Return(client.HealthStatusUnhealthy, nil) // Start monitor with all dependencies - monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb) + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, nil) // Check for unhealthy status healthUpdateCh := monitor.Subscribe() @@ -375,7 +438,7 @@ func (s *HealthMonitorTestSuite) TestRollupBoostPartialStatus() { rb.EXPECT().Healthcheck(mock.Anything).Return(client.HealthStatusPartial, nil) // Start monitor with all dependencies - monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb) + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, nil) // Check for unhealthy status healthUpdateCh := monitor.Subscribe() @@ -385,6 +448,81 @@ func (s *HealthMonitorTestSuite) TestRollupBoostPartialStatus() { s.NoError(monitor.Stop()) } +func (s *HealthMonitorTestSuite) TestRollupBoostPartialStatusWithTolerance() { + s.T().Parallel() + now := uint64(time.Now().Unix()) + + // Setup healthy node conditions + rc := &testutils.MockRollupClient{} + ss1 := mockSyncStatus(now-1, 1, now-3, 0) + + // because 6 healthchecks are going to be expected cause 6 calls of sync status + for i := 0; i < 6; i++ { + rc.ExpectSyncStatus(ss1, nil) + } + + // Setup healthy peer count + pc := &p2pMocks.API{} + ps1 := &p2p.PeerStats{ + Connected: healthyPeerCount, + } + pc.EXPECT().PeerStats(mock.Anything).Return(ps1, nil) + + // Setup partial rollup boost status (treated as unhealthy) + rb := &clientmocks.RollupBoostClient{} + rb.EXPECT().Healthcheck(mock.Anything).Return(client.HealthStatusPartial, nil) + + toleranceLimit := uint64(2) + toleranceIntervalSeconds := uint64(6) + + timeBoundedRotatingCounter, err := NewTimeBoundedRotatingCounter(toleranceIntervalSeconds) + s.Nil(err) + + tp := &timeProvider{now: 1758792282} + + // Start monitor with all dependencies as well as tolerance of 2 rollup-boost partial unhealthiness per 3s period + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, nil, func(shm *SequencerHealthMonitor) { + timeBoundedRotatingCounter.timeProviderFn = tp.Now + + // pollute the cache of timeBoundRotatingCounter with 998 elements so as to later test the lazy cleanup + // note: the 999th and 1000th element will be added by the first healthcheck run + for i := 0; i < 999; i++ { + timeBoundedRotatingCounter.temporalCache[int64(i)] = uint64(1) + } + + shm.rollupBoostPartialHealthinessToleranceCounter = timeBoundedRotatingCounter + shm.rollupBoostPartialHealthinessToleranceLimit = toleranceLimit + }) + + healthUpdateCh := monitor.Subscribe() + + s.Eventually(func() bool { + return len(timeBoundedRotatingCounter.temporalCache) == 1000 + }, time.Second*3, time.Second*1) + + firstHealthStatus := <-healthUpdateCh + secondHealthStatus := <-healthUpdateCh + thirdHealthStatus := <-healthUpdateCh + + s.Nil(firstHealthStatus) + s.Nil(secondHealthStatus) + s.Equal(ErrRollupBoostPartiallyHealthy, thirdHealthStatus) + + tp.Now() // simulate another second passing + // by now, because of three healthchecks, six seconds (CurrentValue + Increment + CurrentValue + Increment + CurrentValue + tp.Now()) have been simulated to pass (by the timeProviderFn) + // this should reset the time bound counter, thereby allowing partial unhealthiness failures to be tolerated again + + fourthHealthStatus := <-healthUpdateCh + fifthHealthStatus := <-healthUpdateCh + sixthHealthStatus := <-healthUpdateCh + + s.Nil(fourthHealthStatus) + s.Nil(fifthHealthStatus) + s.Equal(ErrRollupBoostPartiallyHealthy, sixthHealthStatus) + + s.NoError(monitor.Stop()) +} + func (s *HealthMonitorTestSuite) TestRollupBoostHealthy() { s.T().Parallel() now := uint64(time.Now().Unix()) @@ -411,7 +549,7 @@ func (s *HealthMonitorTestSuite) TestRollupBoostHealthy() { rb.EXPECT().Healthcheck(mock.Anything).After(time.Duration(numSecondsToWait)*time.Second).Return(client.HealthStatusHealthy, nil) // Start monitor with all dependencies - monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb) + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, nil) // Should report healthy status healthUpdateCh := monitor.Subscribe() @@ -466,6 +604,77 @@ func (s *HealthMonitorTestSuite) TestRollupBoostNilClient() { s.NoError(monitor.Stop()) } +func (s *HealthMonitorTestSuite) TestElP2pHealthy() { + s.T().Parallel() + now := uint64(time.Now().Unix()) + numSecondsToWait := interval + 1 + + // Setup healthy node conditions + rc := &testutils.MockRollupClient{} + ss1 := mockSyncStatus(now-1, 1, now-3, 0) + + for i := 0; i < numSecondsToWait; i++ { + rc.ExpectSyncStatus(ss1, nil) + } + + // Setup healthy rollup boost + rb := &clientmocks.RollupBoostClient{} + // // Wait for longer than healthcheck interval before returning healthy status, to verify nothing breaks if rb is slow to respond + rb.EXPECT().Healthcheck(mock.Anything).After(time.Duration(numSecondsToWait)*time.Second).Return(client.HealthStatusHealthy, nil) + + // Setup healthy peer count + pc := &p2pMocks.API{} + ps1 := &p2p.PeerStats{ + Connected: healthyPeerCount, + } + pc.EXPECT().PeerStats(mock.Anything).Return(ps1, nil) + + // Setup healthy el p2p + elP2pClient := &clientmocks.ElP2PClient{} + elP2pClient.EXPECT().PeerCount(mock.Anything).Return(healthyElP2pPeerCount, nil) + + // Start monitor with all dependencies + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, rb, elP2pClient) + + // Should report healthy status + healthUpdateCh := monitor.Subscribe() + healthStatus := <-healthUpdateCh + s.Nil(healthStatus) + + s.NoError(monitor.Stop()) +} + +func (s *HealthMonitorTestSuite) TestElP2pHealthyNilClient() { + s.T().Parallel() + now := uint64(time.Now().Unix()) + numSecondsToWait := interval + 1 + + // Setup healthy node conditions + rc := &testutils.MockRollupClient{} + ss1 := mockSyncStatus(now-1, 1, now-3, 0) + + for i := 0; i < numSecondsToWait; i++ { + rc.ExpectSyncStatus(ss1, nil) + } + + // Setup healthy peer count + pc := &p2pMocks.API{} + ps1 := &p2p.PeerStats{ + Connected: healthyPeerCount, + } + pc.EXPECT().PeerStats(mock.Anything).Return(ps1, nil) + + // Start monitor with all dependencies + monitor := s.SetupMonitorWithRollupBoost(now, 60, 60, rc, pc, nil, nil) + + // Should report healthy status + healthUpdateCh := monitor.Subscribe() + healthStatus := <-healthUpdateCh + s.Nil(healthStatus) + + s.NoError(monitor.Stop()) +} + func mockSyncStatus(unsafeTime, unsafeNum, safeTime, safeNum uint64) *eth.SyncStatus { return ð.SyncStatus{ UnsafeL2: eth.L2BlockRef{ diff --git a/op-conductor/health/timeboundcounter.go b/op-conductor/health/timeboundcounter.go new file mode 100644 index 0000000000000..58918ead9116d --- /dev/null +++ b/op-conductor/health/timeboundcounter.go @@ -0,0 +1,58 @@ +package health + +import ( + "fmt" + "sync" +) + +// this is a type of counter which keeps on incrementing until its reset interval is hit after which it resets to 0 +// this can be used to track time-based rate-limit, error counts, etc. +type timeBoundedRotatingCounter struct { + resetIntervalSeconds uint64 + timeProviderFn func() uint64 + + mut *sync.RWMutex + temporalCache map[int64]uint64 +} + +func NewTimeBoundedRotatingCounter(resetIntervalSeconds uint64) (*timeBoundedRotatingCounter, error) { + if resetIntervalSeconds == 0 { + return nil, fmt.Errorf("reset interval seconds must be more than 0") + } + return &timeBoundedRotatingCounter{ + resetIntervalSeconds: resetIntervalSeconds, + mut: &sync.RWMutex{}, + temporalCache: map[int64]uint64{}, + timeProviderFn: currentTimeProvider, + }, nil +} + +func (t *timeBoundedRotatingCounter) Increment() uint64 { + // let's take `resetIntervalSeconds` as 60s + // truncatedTimestamp is current timestamp rounded off by 60s (resetIntervalSeconds) + // thereby generating a value which stays same until the next 60s helping track and incrementing the counter corresponding to it for the next 60s + currentTsSeconds := t.timeProviderFn() + truncatedTimestamp := int64(currentTsSeconds / t.resetIntervalSeconds) + t.mut.Lock() + // a lazy cleanup subroutine to the clean the cache when it's grown enough, preventing memory leaks + defer func() { + defer t.mut.Unlock() + if len(t.temporalCache) > 1000 { + newCache := map[int64]uint64{ + truncatedTimestamp: t.temporalCache[truncatedTimestamp], + } + t.temporalCache = newCache // garbage collector should take care of the old cache + } + }() + + t.temporalCache[truncatedTimestamp]++ + return t.temporalCache[truncatedTimestamp] +} + +func (t *timeBoundedRotatingCounter) CurrentValue() uint64 { + currentTsSeconds := t.timeProviderFn() + truncatedTimestamp := int64(currentTsSeconds / t.resetIntervalSeconds) + t.mut.RLock() + defer t.mut.RUnlock() + return t.temporalCache[truncatedTimestamp] +} diff --git a/op-conductor/health/timeboundcounter_test.go b/op-conductor/health/timeboundcounter_test.go new file mode 100644 index 0000000000000..f27c128bf0d6b --- /dev/null +++ b/op-conductor/health/timeboundcounter_test.go @@ -0,0 +1,127 @@ +package health + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestTimeBoundedRotatingCounterSetup(t *testing.T) { + t.Parallel() + t.Run("fail with 0 interval seconds value", func(t *testing.T) { + counter, err := NewTimeBoundedRotatingCounter(0) + require.Error(t, err) + require.Nil(t, counter) + }) + + t.Run("succeed with non-zero interval seconds value", func(t *testing.T) { + counter, err := NewTimeBoundedRotatingCounter(2) + require.NoError(t, err) + require.NotNil(t, counter) + }) +} + +func TestTimeBoundedRotatingCounterIncrement(t *testing.T) { + + mockTimeProvider := &timeProvider{now: 0} // every access to .Now() will increment its value simulating a one-second time passing + + resetInterval := uint64(6) + counter, err := NewTimeBoundedRotatingCounter(resetInterval) + require.NoError(t, err) + require.NotNil(t, counter) + counter.timeProviderFn = mockTimeProvider.Now + + require.Equal(t, int(mockTimeProvider.now), 0) + require.Equal(t, uint64(0), counter.CurrentValue()) + require.Equal(t, int(mockTimeProvider.now), 1) + + newValue := counter.Increment() + require.Equal(t, uint64(1), newValue) + require.Equal(t, int(mockTimeProvider.now), 2) + require.Equal(t, uint64(1), counter.CurrentValue()) + require.Equal(t, int(mockTimeProvider.now), 3) + + newValue = counter.Increment() + require.Equal(t, uint64(2), newValue) + require.Equal(t, int(mockTimeProvider.now), 4) + require.Equal(t, uint64(2), counter.CurrentValue()) + require.Equal(t, int(mockTimeProvider.now), 5) + + newValue = counter.Increment() + require.Equal(t, uint64(3), newValue) + require.Equal(t, int(mockTimeProvider.now), 6) + require.Equal(t, uint64(0), counter.CurrentValue()) // the next second counter rotates returning 0 as the current value + require.Equal(t, int(mockTimeProvider.now), 7) + + newValue = counter.Increment() + require.Equal(t, uint64(1), newValue) + require.Equal(t, int(mockTimeProvider.now), 8) + require.Equal(t, uint64(1), counter.CurrentValue()) + require.Equal(t, int(mockTimeProvider.now), 9) + + newValue = counter.Increment() + require.Equal(t, uint64(2), newValue) + require.Equal(t, int(mockTimeProvider.now), 10) + require.Equal(t, uint64(2), counter.CurrentValue()) + require.Equal(t, int(mockTimeProvider.now), 11) + + newValue = counter.Increment() + require.Equal(t, uint64(3), newValue) + require.Equal(t, int(mockTimeProvider.now), 12) + require.Equal(t, uint64(0), counter.CurrentValue()) // the next second counter rotates returning 0 as the current value + require.Equal(t, int(mockTimeProvider.now), 13) + +} + +// To test the bad path: comment out mut.RLock() and mut.RUnlock() in the CurrentValue() method, and run this test again +// you'll see a "fatal error: concurrent map read and map write" +func TestTimeBoundedRotatingCounterConcurrentAccess(t *testing.T) { + mockTimeProvider := &timeProvider{now: 0} + + counter, err := NewTimeBoundedRotatingCounter(1) + require.NoError(t, err) + require.NotNil(t, counter) + counter.timeProviderFn = mockTimeProvider.Now + + wg := &sync.WaitGroup{} + wg.Add(2000) + + write := func() { + defer wg.Done() + counter.Increment() + } + read := func() { + defer wg.Done() + counter.CurrentValue() + } + require.NotPanics(t, func() { + for i := 0; i < 1000; i++ { + go write() + go read() + } + wg.Wait() + }) +} + +func TestTimeBoundedRotatingCounterLazyCleanup(t *testing.T) { + mockTimeProvider := &timeProvider{now: 0} + + // a counter with a reset interval of 2 ensuring every two-seconds the counter's cache would track a new key:value + // we'll trigger the 2-second increment by calling .Increment() and .CurrentValue() because both under the hood, would call .Now() of the mockTimeProvider + counter, err := NewTimeBoundedRotatingCounter(2) + require.NoError(t, err) + require.NotNil(t, counter) + counter.timeProviderFn = mockTimeProvider.Now + + for i := 0; i < 1000; i++ { + counter.Increment() // trigger a 1-second time increase + counter.CurrentValue() // trigger another 1-second time increase, causing the counter interval to reset ensuring next Increment would write a new key in the cache + } + + require.Equal(t, 1000, len(counter.temporalCache)) + + // 1001th increment should trigger the lazy cleanup this time + counter.Increment() + require.Equal(t, 1, len(counter.temporalCache)) +} diff --git a/op-deployer/.gitignore b/op-deployer/.gitignore index d2ecf22590b57..9313d66543a51 100644 --- a/op-deployer/.gitignore +++ b/op-deployer/.gitignore @@ -1,2 +1,3 @@ bin -.deployer \ No newline at end of file +.deployer +pkg/deployer/artifacts/forge-artifacts/* \ No newline at end of file diff --git a/op-deployer/.goreleaser.yaml b/op-deployer/.goreleaser.yaml index eaf05a78dc936..9dccb255de6af 100644 --- a/op-deployer/.goreleaser.yaml +++ b/op-deployer/.goreleaser.yaml @@ -7,7 +7,6 @@ project_name: op-deployer before: hooks: - # You may remove this if you don't use go modules. - go mod tidy builds: @@ -26,8 +25,6 @@ builds: ignore: - goos: windows goarch: arm64 - - goos: linux - goarch: arm64 mod_timestamp: "{{ .CommitTimestamp }}" ldflags: - -X main.GitCommit={{ .FullCommit }} @@ -45,20 +42,6 @@ archives: - goos: windows format: zip -dockers: - - id: default - goos: linux - goarch: amd64 - dockerfile: Dockerfile.default - image_templates: - - "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:{{ .Tag }}" - - id: minimal - goos: linux - goarch: amd64 - dockerfile: Dockerfile.minimal - image_templates: - - "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:{{ .Tag }}-minimal" - changelog: sort: asc filters: @@ -74,4 +57,4 @@ release: monorepo: tag_prefix: op-deployer/ - dir: op-deployer \ No newline at end of file + dir: op-deployer diff --git a/op-deployer/Dockerfile.minimal b/op-deployer/Dockerfile.minimal deleted file mode 100644 index 9e8811980090c..0000000000000 --- a/op-deployer/Dockerfile.minimal +++ /dev/null @@ -1,3 +0,0 @@ -FROM scratch -ENTRYPOINT ["/op-deployer"] -COPY op-deployer /op-deployer \ No newline at end of file diff --git a/op-deployer/book/src/user-guide/bootstrap.md b/op-deployer/book/src/user-guide/bootstrap.md index 8810a61c1ace3..f48360e94a285 100644 --- a/op-deployer/book/src/user-guide/bootstrap.md +++ b/op-deployer/book/src/user-guide/bootstrap.md @@ -3,8 +3,8 @@ > Note: if you are joining an existing superchain, you can skip to the `init` and `apply` commands to create your L2 chain(s) Bootstrap commands are used to deploy global singletons and implementation contracts for new superchains. -The deployed contract be then be use with future invocations of `apply` so that new L2 chains can join that superchain. -Most users won't need to use these commands, since `op-deployer apply` will automatically use predeployed contracts if they are available. However, you may need to use bootstrap commands if you're deploying chains to an L1 that isn't natively supported by `op-deployer`. +The deployed contracts can then be used with future invocations of `apply` so that new L2 chains can join that superchain. +Most users won't need to use these commands, since `op-deployer apply` will automatically use standard predeployed contracts for the L1/settlement-layer you are deploying on. However, you will need to use bootstrap commands if you're creating a new superchain. There are several bootstrap commands available, which you can view by running `op-deployer bootstrap --help`. We'll focus on the most important ones, which should be run in the sequence listed below. @@ -18,7 +18,6 @@ so the deployment address has no further control over the system. op-deployer bootstrap superchain \ --l1-rpc-url="" \ --private-key="" \ - --artifacts-locator="" \ --outfile="./.deployer/bootstrap_superchain.json" \ --superchain-proxy-admin-owner="" \ --protocol-versions-owner="" \ @@ -49,13 +48,13 @@ This command will deploy several contracts, and output a JSON like the one below ```shell op-deployer bootstrap implementations \ - --artifacts-locator="" \ --l1-rpc-url="" \ --outfile="./.deployer/bootstrap_implementations.json" \ - --mips-version="<1 or 2, for MIPS32 or MIPS64>" \ --private-key="" \ - --protocol-versions-proxy="
" \ - --superchain-config-proxy="
" \ + --protocol-versions-proxy="" \ + --superchain-config-proxy="" \ + --superchain-proxy-admin="" \ + --challenger="" \ --upgrade-controller="" ``` @@ -70,19 +69,26 @@ The command will output a JSON like the one below: ```json { - "Opcm": "0x4eeb114aaf812e21285e5b076030110e7e18fed9", - "DelayedWETHImpl": "0x5e40b9231b86984b5150507046e354dbfbed3d9e", - "OptimismPortalImpl": "0x2d7e764a0d9919e16983a46595cfa81fc34fa7cd", - "PreimageOracleSingleton": "0x1fb8cdfc6831fc866ed9c51af8817da5c287add3", - "MipsSingleton": "0xf027f4a985560fb13324e943edf55ad6f1d15dc1", - "SystemConfigImpl": "0x760c48c62a85045a6b69f07f4a9f22868659cbcc", - "L1CrossDomainMessengerImpl": "0x3ea6084748ed1b2a9b5d4426181f1ad8c93f6231", - "L1ERC721BridgeImpl": "0x276d3730f219f7ec22274f7263180b8452b46d47", - "L1StandardBridgeImpl": "0x78972e88ab8bbb517a36caea23b931bab58ad3c6", - "OptimismMintableERC20FactoryImpl": "0x5493f4677a186f64805fe7317d6993ba4863988f", - "DisputeGameFactoryImpl": "0x4bba758f006ef09402ef31724203f316ab74e4a0", - "AnchorStateRegistryImpl": "0x7b465370bb7a333f99edd19599eb7fb1c2d3f8d2", - "SuperchainConfigImpl": "0x4da82a327773965b8d4d85fa3db8249b387458e7", - "ProtocolVersionsImpl": "0x37e15e4d6dffa9e5e320ee1ec036922e563cb76c" + "opcmAddress": "0x82879934658738b6d5e8f781933ae7bbae05ba31", + "opcmContractsContainerAddress": "0x1e8de1574a2e085b7a292c760d90cf982d3c1a11", + "opcmGameTypeAdderAddress": "0xcab868d42d9088b86598a96d010db5819c19b847", + "opcmDeployerAddress": "0xf8b6718b28fa36b430334e78adaf97174fed818c", + "opcmUpgraderAddress": "0xa4d0a44890fafce541bdc4c1ca36fca1b5d22f56", + "opcmInteropMigratorAddress": "0xf0fca53bb450dd2230c7eb58a39a5dbfc8492fb6", + "opcmStandardValidatorAddress": "0x1364a02f64f03cd990f105058b8cc93a9a0ab2a1", + "delayedWETHImplAddress": "0x570da3694c06a250aea4855b4adcd09505801f9a", + "optimismPortalImplAddress": "0x1aa1d3fc9b39d7edd7ca69f54a35c66dcf1168f1", + "ethLockboxImplAddress": "0xe6e51fa10d481002301534445612c61bae6b3258", + "preimageOracleSingletonAddress": "0x1fb8cdfc6831fc866ed9c51af8817da5c287add3", + "mipsSingletonAddress": "0x7a8456ba22df0cb303ae1c93d3cf68ea3a067006", + "systemConfigImplAddress": "0x9f2b1fffd8a7aeef7aeeb002fd8477a4868e7e0a", + "l1CrossDomainMessengerImplAddress": "0x085952eb0f0c3d1ca82061e20e0fe8203cdd630a", + "l1ERC721BridgeImplAddress": "0xbafd2cae054ddf69af27517c6bea912de6b7eb8f", + "l1StandardBridgeImplAddress": "0x6abaa7b42b9a947047c01f41b9bcb8684427bf24", + "optimismMintableERC20FactoryImplAddress": "0xdd0b293b8789e9208481cee5a0c7e78f451d32bf", + "disputeGameFactoryImplAddress": "0xe7ab0c07ee92aae31f213b23a132a155f5c2c7cc", + "anchorStateRegistryImplAddress": "0xda4f46fad0e38d763c56da62c4bc1e9428624893", + "superchainConfigImplAddress": "0xdaf60e3c5ef116810779719da88410cce847c2a4", + "protocolVersionsImplAddress": "0xa95ac4790fedd68d9c3b30ed730afaec6029eb31" } ``` diff --git a/op-deployer/book/src/user-guide/installation.md b/op-deployer/book/src/user-guide/installation.md index ad6b54cbf5280..66286ff924573 100644 --- a/op-deployer/book/src/user-guide/installation.md +++ b/op-deployer/book/src/user-guide/installation.md @@ -16,8 +16,8 @@ binaries, download the latest release from the [releases page][releases] and ext To install from source, you will need Go, `just`, and `git`. Then, run the following: ```shell -git clone git@github.com:ethereum-optimism/ethereum-optimism.git # you can skip this if you already have the repo -cd ethereum-optimism/op-deployer +git clone git@github.com:ethereum-optimism/optimism.git # you can skip this if you already have the repo +cd optimism/op-deployer just build cp ./bin/op-deployer /usr/local/bin/op-deployer # or any other directory in your $PATH -``` \ No newline at end of file +``` diff --git a/op-deployer/cmd/op-deployer/main.go b/op-deployer/cmd/op-deployer/main.go index 633f27636620f..ff75d56024287 100644 --- a/op-deployer/cmd/op-deployer/main.go +++ b/op-deployer/cmd/op-deployer/main.go @@ -4,20 +4,10 @@ import ( "fmt" "os" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/clean" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/verify" - - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/bootstrap" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/inspect" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/manage" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/cli" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/version" opservice "github.com/ethereum-optimism/optimism/op-service" - - "github.com/ethereum-optimism/optimism/op-service/cliapp" - "github.com/urfave/cli/v2" ) var ( @@ -29,57 +19,7 @@ var ( var VersionWithMeta = opservice.FormatVersion(version.Version, GitCommit, GitDate, version.Meta) func main() { - app := cli.NewApp() - app.Version = VersionWithMeta - app.Name = "op-deployer" - app.Usage = "Tool to configure and deploy OP Chains." - app.Flags = cliapp.ProtectFlags(deployer.GlobalFlags) - app.Commands = []*cli.Command{ - { - Name: "init", - Usage: "initializes a chain intent and state file", - Flags: cliapp.ProtectFlags(deployer.InitFlags), - Action: deployer.InitCLI(), - }, - { - Name: "apply", - Usage: "applies a chain intent to the chain", - Flags: cliapp.ProtectFlags(deployer.ApplyFlags), - Action: deployer.ApplyCLI(), - }, - { - Name: "upgrade", - Usage: "upgrades contracts by sending tx to OPCM.upgrade function", - Flags: cliapp.ProtectFlags(deployer.UpgradeFlags), - Subcommands: upgrade.Commands, - }, - { - Name: "bootstrap", - Usage: "bootstraps global contract instances", - Subcommands: bootstrap.Commands, - }, - { - Name: "inspect", - Usage: "inspects the state of a deployment", - Subcommands: inspect.Commands, - }, - { - Name: "clean", - Usage: "cleans up various things", - Subcommands: clean.Commands, - }, - { - Name: "verify", - Usage: "verifies deployed contracts on Etherscan", - Flags: cliapp.ProtectFlags(deployer.VerifyFlags), - Action: verify.VerifyCLI, - }, - { - Name: "manage", - Usage: "manages the chain", - Subcommands: manage.Commands, - }, - } + app := cli.NewApp(VersionWithMeta) app.Writer = os.Stdout app.ErrWriter = os.Stderr err := app.Run(os.Args) diff --git a/op-deployer/justfile b/op-deployer/justfile index a8ffe0069d3b1..e10fce3938e0d 100644 --- a/op-deployer/justfile +++ b/op-deployer/justfile @@ -1,20 +1,54 @@ -build: - go build -o bin/op-deployer cmd/op-deployer/main.go +import '../justfiles/go.just' -build-contracts: - just ../packages/contracts-bedrock/forge-build +build: build-contracts copy-contract-artifacts build-go -test args='./...': build-contracts +test args='./...': build-contracts copy-contract-artifacts go test -v {{args}} -download-artifacts checksum outfile: - curl -o {{outfile}} -L https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-{{checksum}}.tar.gz +build-contracts: + just ../packages/contracts-bedrock/forge-build + +copy-contract-artifacts: + rm -f ./pkg/deployer/artifacts/forge-artifacts/artifacts.tgz + tar -cvzf ./pkg/deployer/artifacts/forge-artifacts/artifacts.tgz -C ../packages/contracts-bedrock/forge-artifacts --exclude="*.t.sol" . + +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X main.Version=" + VERSION + " " + \ + "-X main.Meta=" + VERSION_META + " " + \ + "") + "'" + +build-go: (go_build "./bin/op-deployer" "./cmd/op-deployer" "-ldflags" _LDFLAGSSTRING) + +docker-build: build-contracts copy-contract-artifacts + #!/bin/bash + cd .. + docker buildx bake op-deployer --load --set op-deployer.tags=op-deployer:local + +# Updates pkg/deployer/forge/version.json with version and checksum of tarball for each supported OS/arch +# - forge_version format: "v1.3.4" +update-forge forge_version="": + #!/bin/bash + ./scripts/forge-checksums.sh {{forge_version}} -calculate-artifacts-hash checksum: - just download-artifacts {{checksum}} /tmp/artifact.tgz - sha256sum /tmp/artifact.tgz - rm /tmp/artifact.tgz +check-forge-version: + #!/bin/bash + set -euo pipefail + mise_version=$(yq '.tools.forge' ../mise.toml) + echo "mise forge version : $mise_version" + op_deployer_version=$(jq -r '.forge' pkg/deployer/forge/version.json | sed 's/^v//') + echo "op-deployer forge version: $op_deployer_version" + + if [ "$mise_version" != "$op_deployer_version" ]; then + echo "ERROR: Forge versions don't match!" + echo " mise : $mise_version" + echo " op-deployer: $op_deployer_version" + exit 1 + fi + + echo -e "\n✓ forge versions match: $mise_version" # ====================================== # Deployment and Verification Utilities @@ -25,12 +59,12 @@ calculate-artifacts-hash checksum: # ====================================== export ROOT_DIR := parent_directory(justfile_directory()) -export DEFAULT_LOCATOR := "file://" + ROOT_DIR + "/packages/contracts-bedrock/forge-artifacts" +export DEFAULT_LOCATOR := "embedded" export NETWORK := env_var_or_default("NETWORK", "sepolia") export PROTOCOL_VERSIONS_PROXY := if NETWORK == "mainnet" { "0x8062AbC286f5e7D9428a0Ccb9AbD71e50d93b935" } else if NETWORK == "sepolia" { "0x79ADD5713B383DAa0a138d3C4780C7A1804a8090" } else { "" } export SUPERCHAIN_CONFIG_PROXY := if NETWORK == "mainnet" { "0x95703e0982140D16f8ebA6d158FccEde42f04a4C" } else if NETWORK == "sepolia" { "0xC2Be75506d5724086DEB7245bd260Cc9753911Be" } else { "" } export SUPERCHAIN_PROXY_ADMIN := if NETWORK == "mainnet" { "0x543ba4aadbab8f9025686bd03993043599c6fb04" } else if NETWORK == "sepolia" { "0x189abaaaa82dfc015a588a7dbad6f13b1d3485bc" } else { "" } -export UPGRADE_CONTROLLER := if NETWORK == "mainnet" { "0x5a0Aae59D09fccBdDb6C6CcEB07B7279367C3d2A" } else if NETWORK == "sepolia" { "0x1Eb2fFc903729a0F03966B917003800b145F56E2" } else { "" } +export L1_PROXY_ADMIN_OWNER := if NETWORK == "mainnet" { "0x5a0Aae59D09fccBdDb6C6CcEB07B7279367C3d2A" } else if NETWORK == "sepolia" { "0x1Eb2fFc903729a0F03966B917003800b145F56E2" } else { "" } export CHALLENGER := if NETWORK == "mainnet" { "0x9BA6e03D8B90dE867373Db8cF1A58d2F7F006b3A" } else if NETWORK == "sepolia" { "0xfd1D2e729aE8eEe2E146c033bf4400fE75284301" } else { "" } # This command should be called before any deployment or verification commands. @@ -52,13 +86,12 @@ deploy-opcm release="dev" locator="$DEFAULT_LOCATOR": _validate_rpc #!/bin/bash echo "Using artifacts locator: {{locator}}" ./bin/op-deployer bootstrap implementations \ - --artifacts-locator {{locator}} \ --l1-rpc-url $ETH_RPC_URL \ --private-key $PRIVATE_KEY \ --mips-version 7 \ --protocol-versions-proxy $PROTOCOL_VERSIONS_PROXY \ --superchain-config-proxy $SUPERCHAIN_CONFIG_PROXY \ - --upgrade-controller $UPGRADE_CONTROLLER \ + --l1-proxy-admin-owner $L1_PROXY_ADMIN_OWNER \ --l1-contracts-release {{release}} \ --superchain-proxy-admin $SUPERCHAIN_PROXY_ADMIN \ --challenger $CHALLENGER \ diff --git a/op-deployer/pkg/cli/app.go b/op-deployer/pkg/cli/app.go new file mode 100644 index 0000000000000..10968280d09fc --- /dev/null +++ b/op-deployer/pkg/cli/app.go @@ -0,0 +1,76 @@ +package cli + +import ( + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/bootstrap" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/clean" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/inspect" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/manage" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/upgrade" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/verify" + + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/urfave/cli/v2" +) + +// NewApp creates and configures a new CLI application +func NewApp(versionWithMeta string) *cli.App { + app := cli.NewApp() + app.Version = versionWithMeta + app.Name = "op-deployer" + app.Usage = "Tool to configure and deploy OP Chains." + app.Flags = cliapp.ProtectFlags(deployer.GlobalFlags) + app.Before = func(context *cli.Context) error { + if err := deployer.CreateCacheDir(context.String(deployer.CacheDirFlagName)); err != nil { + return err + } + return nil + } + app.Commands = []*cli.Command{ + { + Name: "init", + Usage: "initializes a chain intent and state file", + Flags: cliapp.ProtectFlags(deployer.InitFlags), + Action: deployer.InitCLI(), + }, + { + Name: "apply", + Usage: "applies a chain intent to the chain", + Flags: cliapp.ProtectFlags(deployer.ApplyFlags), + Action: deployer.ApplyCLI(), + }, + { + Name: "upgrade", + Usage: "upgrades contracts by sending tx to OPCM.upgrade function", + Flags: cliapp.ProtectFlags(deployer.UpgradeFlags), + Subcommands: upgrade.Commands, + }, + { + Name: "bootstrap", + Usage: "bootstraps global contract instances", + Subcommands: bootstrap.Commands, + }, + { + Name: "inspect", + Usage: "inspects the state of a deployment", + Subcommands: inspect.Commands, + }, + { + Name: "clean", + Usage: "cleans up various things", + Subcommands: clean.Commands, + }, + { + Name: "verify", + Usage: "verifies deployed contracts on Etherscan", + Flags: cliapp.ProtectFlags(deployer.VerifyFlags), + Action: verify.VerifyCLI, + }, + { + Name: "manage", + Usage: "manages the chain", + Subcommands: manage.Commands, + }, + } + return app +} diff --git a/op-deployer/pkg/deployer/apply.go b/op-deployer/pkg/deployer/apply.go index ffb2d0d0cbcf9..9f0200aa301e3 100644 --- a/op-deployer/pkg/deployer/apply.go +++ b/op-deployer/pkg/deployer/apply.go @@ -7,6 +7,8 @@ import ( "math/big" "strings" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" @@ -170,7 +172,7 @@ func ApplyPipeline( } st := opts.State - l1ArtifactsFS, err := artifacts.Download(ctx, intent.L1ContractsLocator, artifacts.BarProgressor(), opts.CacheDir) + l1ArtifactsFS, err := artifacts.Download(ctx, intent.L1ContractsLocator, ioutil.BarProgressor(), opts.CacheDir) if err != nil { return fmt.Errorf("failed to download L1 artifacts: %w", err) } @@ -179,7 +181,7 @@ func ApplyPipeline( if intent.L1ContractsLocator.Equal(intent.L2ContractsLocator) { l2ArtifactsFS = l1ArtifactsFS } else { - l2Afs, err := artifacts.Download(ctx, intent.L2ContractsLocator, artifacts.BarProgressor(), opts.CacheDir) + l2Afs, err := artifacts.Download(ctx, intent.L2ContractsLocator, ioutil.BarProgressor(), opts.CacheDir) if err != nil { return fmt.Errorf("failed to download L2 artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/artifacts/download.go b/op-deployer/pkg/deployer/artifacts/download.go index 7fa543fe5ad6e..35fac6338ba2f 100644 --- a/op-deployer/pkg/deployer/artifacts/download.go +++ b/op-deployer/pkg/deployer/artifacts/download.go @@ -2,60 +2,43 @@ package artifacts import ( "archive/tar" - "bufio" "bytes" "compress/gzip" "context" "crypto/sha256" "errors" "fmt" - "io" "io/fs" - "net/http" "net/url" "os" "path" - "strings" "sync" + "github.com/ethereum-optimism/optimism/op-service/httputil" + + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" ) var ErrUnsupportedArtifactsScheme = errors.New("unsupported artifacts URL scheme") type Downloader interface { - Download(ctx context.Context, url string, progress DownloadProgressor, targetDir string) (string, error) + Download(ctx context.Context, url string, progress ioutil.Progressor, targetDir string) (string, error) } type Extractor interface { Extract(src string, dest string) (string, error) } -func Download(ctx context.Context, loc *Locator, progressor DownloadProgressor, targetDir string) (foundry.StatDirFs, error) { +func Download(ctx context.Context, loc *Locator, progressor ioutil.Progressor, targetDir string) (foundry.StatDirFs, error) { if progressor == nil { - progressor = NoopProgressor() + progressor = ioutil.NoopProgressor() } - var u *url.URL var err error - var checker integrityChecker - if loc.IsTag() { - u, err = standard.ArtifactsURLForTag(loc.Tag) - if err != nil { - return nil, fmt.Errorf("failed to get standard artifacts URL for tag %s: %w", loc.Tag, err) - } - - hash, err := standard.ArtifactsHashForTag(loc.Tag) - if err != nil { - return nil, fmt.Errorf("failed to get standard artifacts hash for tag %s: %w", loc.Tag, err) - } - - checker = &hashIntegrityChecker{hash: hash} - } else { - u = loc.URL - checker = new(noopIntegrityChecker) - } + u := loc.URL + checker := new(noopIntegrityChecker) var artifactsFS fs.FS switch u.Scheme { @@ -66,13 +49,18 @@ func Download(ctx context.Context, loc *Locator, progressor DownloadProgressor, } case "file": artifactsFS = os.DirFS(u.Path) + case "embedded": + artifactsFS, err = ExtractEmbedded(targetDir) + if err != nil { + return nil, fmt.Errorf("failed to extract embedded artifacts: %w", err) + } default: return nil, ErrUnsupportedArtifactsScheme } return artifactsFS.(foundry.StatDirFs), nil } -func downloadHTTP(ctx context.Context, u *url.URL, progressor DownloadProgressor, checker integrityChecker, targetDir string) (fs.FS, error) { +func downloadHTTP(ctx context.Context, u *url.URL, progressor ioutil.Progressor, checker integrityChecker, targetDir string) (fs.FS, error) { cacher := &CachingDownloader{ d: new(HTTPDownloader), } @@ -96,21 +84,7 @@ func downloadHTTP(ctx context.Context, u *url.URL, progressor DownloadProgressor type HTTPDownloader struct{} -func (d *HTTPDownloader) Download(ctx context.Context, url string, progress DownloadProgressor, targetDir string) (string, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) - if err != nil { - return "", fmt.Errorf("failed to create request: %w", err) - } - - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", fmt.Errorf("failed to download artifacts: %w", err) - } - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("failed to download artifacts: invalid status code %s", res.Status) - } - defer res.Body.Close() - +func (d *HTTPDownloader) Download(ctx context.Context, url string, progress ioutil.Progressor, targetDir string) (string, error) { if err := os.MkdirAll(targetDir, 0755); err != nil { return "", fmt.Errorf("failed to ensure cache directory '%s': %w", targetDir, err) } @@ -118,16 +92,12 @@ func (d *HTTPDownloader) Download(ctx context.Context, url string, progress Down if err != nil { return "", fmt.Errorf("failed to create temporary file: %w", err) } - - pr := &progressReader{ - r: res.Body, - progress: progress, - total: res.ContentLength, + downloader := &httputil.Downloader{ + Progressor: progress, } - if _, err := io.Copy(tmpFile, pr); err != nil { - return "", fmt.Errorf("failed to write to temporary file: %w", err) + if err := downloader.Download(ctx, url, tmpFile); err != nil { + return "", fmt.Errorf("failed to download: %w", err) } - return tmpFile.Name(), nil } @@ -136,7 +106,7 @@ type CachingDownloader struct { mtx sync.Mutex } -func (d *CachingDownloader) Download(ctx context.Context, url string, progress DownloadProgressor, targetDir string) (string, error) { +func (d *CachingDownloader) Download(ctx context.Context, url string, progress ioutil.Progressor, targetDir string) (string, error) { d.mtx.Lock() defer d.mtx.Unlock() @@ -175,47 +145,9 @@ func (e *TarballExtractor) Extract(src string, dest string) error { defer gzr.Close() tr := tar.NewReader(gzr) - if err := untar(dest, tr); err != nil { + if err := ioutil.Untar(dest, tr); err != nil { return fmt.Errorf("failed to untar: %w", err) } return nil } - -func untar(dir string, tr *tar.Reader) error { - for { - hdr, err := tr.Next() - if err == io.EOF { - return nil - } - if err != nil { - return fmt.Errorf("failed to read tar header: %w", err) - } - - cleanedName := path.Clean(hdr.Name) - if strings.Contains(cleanedName, "..") { - return fmt.Errorf("invalid file path: %s", hdr.Name) - } - dst := path.Join(dir, cleanedName) - if hdr.FileInfo().IsDir() { - if err := os.MkdirAll(dst, 0o755); err != nil { - return fmt.Errorf("failed to create directory: %w", err) - } - continue - } - - f, err := os.Create(dst) - buf := bufio.NewWriter(f) - if err != nil { - return fmt.Errorf("failed to create file: %w", err) - } - if _, err := io.Copy(buf, tr); err != nil { - _ = f.Close() - return fmt.Errorf("failed to write file: %w", err) - } - if err := buf.Flush(); err != nil { - return fmt.Errorf("failed to flush buffer: %w", err) - } - _ = f.Close() - } -} diff --git a/op-deployer/pkg/deployer/artifacts/download_test.go b/op-deployer/pkg/deployer/artifacts/download_test.go index e8349c1e84655..19cd08c8e0186 100644 --- a/op-deployer/pkg/deployer/artifacts/download_test.go +++ b/op-deployer/pkg/deployer/artifacts/download_test.go @@ -111,19 +111,3 @@ func TestDownloadArtifacts_MockArtifacts(t *testing.T) { require.ErrorContains(t, err, "integrity check failed") }) } - -func TestDownloadArtifacts_TaggedVersions(t *testing.T) { - tags := []string{ - "op-contracts/v1.6.0", - "op-contracts/v1.7.0-beta.1+l2-contracts", - } - testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) - for _, tag := range tags { - t.Run(tag, func(t *testing.T) { - t.Parallel() - loc := MustNewLocatorFromTag(tag) - _, err := Download(context.Background(), loc, nil, testCacheDir) - require.NoError(t, err) - }) - } -} diff --git a/op-deployer/pkg/deployer/artifacts/embedded.go b/op-deployer/pkg/deployer/artifacts/embedded.go new file mode 100644 index 0000000000000..c5b0d9878d5f9 --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/embedded.go @@ -0,0 +1,39 @@ +package artifacts + +import ( + "archive/tar" + "compress/gzip" + "embed" + "fmt" + "os" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +//go:embed forge-artifacts +var embedDir embed.FS + +const embeddedArtifactsFile = "artifacts.tgz" + +func ExtractEmbedded(dir string) (foundry.StatDirFs, error) { + f, err := embedDir.Open(filepath.Join("forge-artifacts", embeddedArtifactsFile)) + if err != nil { + return nil, fmt.Errorf("could not open embedded artifacts: %w", err) + } + defer f.Close() + + gzr, err := gzip.NewReader(f) + if err != nil { + return nil, fmt.Errorf("could not create gzip reader: %w", err) + } + defer gzr.Close() + + tr := tar.NewReader(gzr) + if err := ioutil.Untar(dir, tr); err != nil { + return nil, fmt.Errorf("failed to untar embedded artifacts: %w", err) + } + + return os.DirFS(dir).(foundry.StatDirFs), nil +} diff --git a/op-deployer/pkg/deployer/artifacts/forge-artifacts/README.md b/op-deployer/pkg/deployer/artifacts/forge-artifacts/README.md new file mode 100644 index 0000000000000..9ee790272e519 --- /dev/null +++ b/op-deployer/pkg/deployer/artifacts/forge-artifacts/README.md @@ -0,0 +1,2 @@ +Artifacts in this directory will be embedded inside the `op-deployer` binary. The directory can be populated by running +`just copy-contract-artifacts`. \ No newline at end of file diff --git a/op-deployer/pkg/deployer/artifacts/locator.go b/op-deployer/pkg/deployer/artifacts/locator.go index 47d2ac1089003..5c2c00e9789a7 100644 --- a/op-deployer/pkg/deployer/artifacts/locator.go +++ b/op-deployer/pkg/deployer/artifacts/locator.go @@ -1,50 +1,35 @@ package artifacts import ( + "errors" "fmt" "net/url" "strings" - - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" ) type schemeUnmarshaler func(string) (*Locator, error) var schemeUnmarshalerDispatch = map[string]schemeUnmarshaler{ - "tag": unmarshalTag, "file": unmarshalURL, "http": unmarshalURL, "https": unmarshalURL, } -var DefaultL1ContractsLocator = &Locator{ - Tag: standard.DefaultL1ContractsTag, -} +const EmbeddedLocatorString = "embedded" -var DefaultL2ContractsLocator = &Locator{ - Tag: standard.DefaultL2ContractsTag, +var embeddedURL = &url.URL{ + Scheme: EmbeddedLocatorString, } -func NewLocatorFromTag(tag string) (*Locator, error) { - loc := new(Locator) - if err := loc.UnmarshalText([]byte("tag://" + tag)); err != nil { - return nil, fmt.Errorf("failed to unmarshal tag: %w", err) - } - return loc, nil +var EmbeddedLocator = &Locator{ + URL: embeddedURL, } -func MustNewLocatorFromTag(tag string) *Locator { - loc, err := NewLocatorFromTag(tag) - if err != nil { - panic(err) - } - return loc -} +var DefaultL1ContractsLocator = EmbeddedLocator + +var DefaultL2ContractsLocator = EmbeddedLocator func NewLocatorFromURL(u string) (*Locator, error) { - if strings.HasPrefix(u, "tag://") { - return NewLocatorFromTag(strings.TrimPrefix(u, "tag://")) - } parsedURL, err := url.Parse(u) if err != nil { return nil, fmt.Errorf("failed to parse URL: %w", err) @@ -72,7 +57,6 @@ func MustNewFileLocator(path string) *Locator { type Locator struct { URL *url.URL - Tag string } func NewFileLocator(path string) (*Locator, error) { @@ -87,8 +71,17 @@ func NewFileLocator(path string) (*Locator, error) { func (a *Locator) UnmarshalText(text []byte) error { str := string(text) + if strings.HasPrefix(str, "tag://") { + return errors.New("tag:// locators are no longer supported - use embedded artifacts instead") + } + + if str == "embedded" { + *a = *EmbeddedLocator + return nil + } + for scheme, unmarshaler := range schemeUnmarshalerDispatch { - if !strings.HasPrefix(str, scheme+"://") { + if !strings.HasPrefix(str, scheme+":") { continue } @@ -101,19 +94,33 @@ func (a *Locator) UnmarshalText(text []byte) error { return nil } - return fmt.Errorf("unsupported scheme") + return fmt.Errorf("unsupported scheme %s", str) } func (a *Locator) MarshalText() ([]byte, error) { - if a.URL != nil { - return []byte(a.URL.String()), nil + if a.URL.String() == embeddedURL.String() || a.URL.String() == "" { + return []byte("embedded"), nil } - return []byte("tag://" + a.Tag), nil + return []byte(a.URL.String()), nil +} + +func (a *Locator) MarshalTOML() ([]byte, error) { + if a.URL.String() == embeddedURL.String() || a.URL.String() == "" { + return []byte(`"embedded"`), nil + } + return []byte(`"` + a.URL.String() + `"`), nil } -func (a *Locator) IsTag() bool { - return a.Tag != "" +func (a *Locator) UnmarshalTOML(i interface{}) error { + switch v := i.(type) { + case string: + return a.UnmarshalText([]byte(v)) + case []byte: + return a.UnmarshalText(v) + default: + return fmt.Errorf("unsupported type for TOML unmarshaling: %T", i) + } } func (a *Locator) Equal(b *Locator) bool { @@ -122,13 +129,8 @@ func (a *Locator) Equal(b *Locator) bool { return string(aStr) == string(bStr) } -func unmarshalTag(tag string) (*Locator, error) { - tag = strings.TrimPrefix(tag, "tag://") - if !strings.HasPrefix(tag, "op-contracts/") { - return nil, fmt.Errorf("invalid tag: %s", tag) - } - - return &Locator{Tag: tag}, nil +func (a *Locator) IsEmbedded() bool { + return a.URL.String() == embeddedURL.String() } func unmarshalURL(text string) (*Locator, error) { diff --git a/op-deployer/pkg/deployer/artifacts/locator_test.go b/op-deployer/pkg/deployer/artifacts/locator_test.go index e58205d4390b8..e8433b9408257 100644 --- a/op-deployer/pkg/deployer/artifacts/locator_test.go +++ b/op-deployer/pkg/deployer/artifacts/locator_test.go @@ -14,20 +14,6 @@ func TestLocator_Marshaling(t *testing.T) { out *Locator err bool }{ - { - name: "valid tag", - in: "tag://op-contracts/v1.6.0", - out: &Locator{ - Tag: "op-contracts/v1.6.0", - }, - err: false, - }, - { - name: "mal-formed tag", - in: "tag://honk", - out: nil, - err: true, - }, { name: "valid HTTPS URL", in: "https://example.com", @@ -70,6 +56,14 @@ func TestLocator_Marshaling(t *testing.T) { out: nil, err: true, }, + { + name: "embedded", + in: "embedded", + out: &Locator{ + URL: embeddedURL, + }, + err: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -101,21 +95,6 @@ func TestLocator_Equal(t *testing.T) { b *Locator equal bool }{ - { - MustNewLocatorFromTag("op-contracts/v1.6.0"), - MustNewLocatorFromTag("op-contracts/v1.8.0-rc.4"), - false, - }, - { - MustNewLocatorFromTag("op-contracts/v1.6.0"), - MustNewLocatorFromTag("op-contracts/v1.6.0"), - true, - }, - { - MustNewLocatorFromURL("http://www.example.com"), - MustNewLocatorFromTag("op-contracts/v1.6.0"), - false, - }, { MustNewLocatorFromURL("https://www.example.com"), MustNewLocatorFromURL("http://www.example.com"), @@ -126,11 +105,6 @@ func TestLocator_Equal(t *testing.T) { MustNewLocatorFromURL("http://www.example.com"), true, }, - { - MustNewLocatorFromTag("op-contracts/v1.6.0"), - MustNewFileLocator("/foo/bar"), - false, - }, { MustNewFileLocator("/foo/bar"), MustNewFileLocator("/foo/bar"), diff --git a/op-deployer/pkg/deployer/artifacts/progress.go b/op-deployer/pkg/deployer/artifacts/progress.go deleted file mode 100644 index d6a3eed57509f..0000000000000 --- a/op-deployer/pkg/deployer/artifacts/progress.go +++ /dev/null @@ -1,48 +0,0 @@ -package artifacts - -import ( - "io" - "sync" - - "github.com/ethereum/go-ethereum/log" - "github.com/schollz/progressbar/v3" -) - -type DownloadProgressor func(current, total int64) - -func BarProgressor() DownloadProgressor { - var bar *progressbar.ProgressBar - var init sync.Once - return func(curr, total int64) { - init.Do(func() { - bar = progressbar.DefaultBytes(total) - }) - _ = bar.Set64(curr) - } -} - -func NoopProgressor() DownloadProgressor { - return func(curr, total int64) {} -} - -func LogProgressor(lgr log.Logger) DownloadProgressor { - return func(curr, total int64) { - lgr.Info("artifacts download progress", "current", curr, "total", total) - } -} - -type progressReader struct { - r io.Reader - progress DownloadProgressor - curr int64 - total int64 -} - -func (pr *progressReader) Read(p []byte) (int, error) { - n, err := pr.r.Read(p) - pr.curr += int64(n) - if pr.progress != nil { - pr.progress(pr.curr, pr.total) - } - return n, err -} diff --git a/op-deployer/pkg/deployer/bootstrap/flags.go b/op-deployer/pkg/deployer/bootstrap/flags.go index d96d58f11e18b..dbf4e0625e34e 100644 --- a/op-deployer/pkg/deployer/bootstrap/flags.go +++ b/op-deployer/pkg/deployer/bootstrap/flags.go @@ -16,9 +16,9 @@ const ( ProofMaturityDelaySecondsFlagName = "proof-maturity-delay-seconds" DisputeGameFinalityDelaySecondsFlagName = "dispute-game-finality-delay-seconds" MIPSVersionFlagName = "mips-version" + DevFeatureBitmapFlagName = "dev-feature-bitmap" ProxyOwnerFlagName = "proxy-owner" SuperchainProxyAdminOwnerFlagName = "superchain-proxy-admin-owner" - L1ContractsReleaseFlagName = "l1-contracts-release" ProtocolVersionsOwnerFlagName = "protocol-versions-owner" GuardianFlagName = "guardian" PausedFlagName = "paused" @@ -69,6 +69,12 @@ var ( EnvVars: deployer.PrefixEnvVar("MIPS_VERSION"), Value: standard.MIPSVersion, } + DevFeatureBitmapFlag = &cli.StringFlag{ + Name: DevFeatureBitmapFlagName, + Usage: "Development feature bitmap.", + EnvVars: deployer.PrefixEnvVar("DEV_FEATURE_BITMAP"), + Value: common.Hash{}.Hex(), + } ProxyOwnerFlag = &cli.StringFlag{ Name: ProxyOwnerFlagName, Usage: "Proxy owner address.", @@ -81,12 +87,6 @@ var ( EnvVars: deployer.PrefixEnvVar("SUPERCHAIN_PROXY_ADMIN_OWNER"), Value: common.Address{}.Hex(), } - L1ContractsReleaseFlag = &cli.StringFlag{ - Name: L1ContractsReleaseFlagName, - Usage: "L1 contracts release", - EnvVars: deployer.PrefixEnvVar("L1_CONTRACTS_RELEASE"), - Value: "dev", - } ProtocolVersionsOwnerFlag = &cli.StringFlag{ Name: ProtocolVersionsOwnerFlagName, Usage: "Owner address for protocol versions", @@ -124,10 +124,11 @@ var ( Usage: "Protocol versions proxy.", EnvVars: deployer.PrefixEnvVar("PROTOCOL_VERSIONS_PROXY"), } - UpgradeControllerFlag = &cli.StringFlag{ - Name: "upgrade-controller", - Usage: "Upgrade controller.", - EnvVars: deployer.PrefixEnvVar("UPGRADE_CONTROLLER"), + L1ProxyAdminOwnerFlag = &cli.StringFlag{ + Name: "l1-proxy-admin-owner", + Aliases: []string{"upgrade-controller"}, + Usage: "L1 ProxyAdmin Owner.", + EnvVars: append(deployer.PrefixEnvVar("L1_PROXY_ADMIN_OWNER"), deployer.PrefixEnvVar("UPGRADE_CONTROLLER")...), } SuperchainProxyAdminFlag = &cli.StringFlag{ Name: "superchain-proxy-admin", @@ -139,6 +140,11 @@ var ( Usage: "Path to a JSON file", EnvVars: deployer.PrefixEnvVar("CONFIG"), } + ChallengerFlag = &cli.StringFlag{ + Name: "challenger", + Usage: "Challenger.", + EnvVars: deployer.PrefixEnvVar("CHALLENGER"), + } ) var ImplementationsFlags = []cli.Flag{ @@ -147,16 +153,17 @@ var ImplementationsFlags = []cli.Flag{ OutfileFlag, deployer.ArtifactsLocatorFlag, MIPSVersionFlag, + DevFeatureBitmapFlag, WithdrawalDelaySecondsFlag, MinProposalSizeBytesFlag, ChallengePeriodSecondsFlag, ProofMaturityDelaySecondsFlag, DisputeGameFinalityDelaySecondsFlag, - L1ContractsReleaseFlag, SuperchainConfigProxyFlag, ProtocolVersionsProxyFlag, - UpgradeControllerFlag, + L1ProxyAdminOwnerFlag, SuperchainProxyAdminFlag, + ChallengerFlag, } var ProxyFlags = []cli.Flag{ diff --git a/op-deployer/pkg/deployer/bootstrap/implementations.go b/op-deployer/pkg/deployer/bootstrap/implementations.go index d56e8b61f536a..f6e6d17e17254 100644 --- a/op-deployer/pkg/deployer/bootstrap/implementations.go +++ b/op-deployer/pkg/deployer/bootstrap/implementations.go @@ -9,10 +9,10 @@ import ( "strings" mipsVersion "github.com/ethereum-optimism/optimism/cannon/mipsevm/versions" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/env" "github.com/ethereum-optimism/optimism/op-service/cliutil" opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" @@ -32,16 +32,20 @@ type ImplementationsConfig struct { L1RPCUrl string `cli:"l1-rpc-url"` PrivateKey string `cli:"private-key"` ArtifactsLocator *artifacts.Locator `cli:"artifacts-locator"` - L1ContractsRelease string `cli:"l1-contracts-release"` MIPSVersion int `cli:"mips-version"` WithdrawalDelaySeconds uint64 `cli:"withdrawal-delay-seconds"` MinProposalSizeBytes uint64 `cli:"min-proposal-size-bytes"` ChallengePeriodSeconds uint64 `cli:"challenge-period-seconds"` ProofMaturityDelaySeconds uint64 `cli:"proof-maturity-delay-seconds"` DisputeGameFinalityDelaySeconds uint64 `cli:"dispute-game-finality-delay-seconds"` + DevFeatureBitmap common.Hash `cli:"dev-feature-bitmap"` + FaultGameMaxGameDepth uint64 `cli:"fault-game-max-game-depth"` + FaultGameSplitDepth uint64 `cli:"fault-game-split-depth"` + FaultGameClockExtension uint64 `cli:"fault-game-clock-extension"` + FaultGameMaxClockDuration uint64 `cli:"fault-game-max-clock-duration"` SuperchainConfigProxy common.Address `cli:"superchain-config-proxy"` ProtocolVersionsProxy common.Address `cli:"protocol-versions-proxy"` - UpgradeController common.Address `cli:"upgrade-controller"` + L1ProxyAdminOwner common.Address `cli:"l1-proxy-admin-owner"` SuperchainProxyAdmin common.Address `cli:"superchain-proxy-admin"` Challenger common.Address `cli:"challenger"` CacheDir string `cli:"cache-dir"` @@ -71,15 +75,6 @@ func (c *ImplementationsConfig) Check() error { if c.ArtifactsLocator == nil { return errors.New("artifacts locator must be specified") } - if c.ArtifactsLocator.IsTag() { - if c.L1ContractsRelease != "" { - return errors.New("l1 contracts release cannot be specified if using an artifacts tag") - } - - c.L1ContractsRelease = c.ArtifactsLocator.Tag - } else if c.L1ContractsRelease == "" { - return errors.New("l1 contracts release must be specified if not using an artifacts tag") - } if !mipsVersion.IsSupported(c.MIPSVersion) { return errors.New("MIPS version is not supported") } @@ -98,14 +93,30 @@ func (c *ImplementationsConfig) Check() error { if c.DisputeGameFinalityDelaySeconds == 0 { return errors.New("dispute game finality delay in seconds must be specified") } + // Check V2 fault game parameters only if V2 dispute games feature is enabled + deployV2Games := deployer.IsDevFeatureEnabled(c.DevFeatureBitmap, deployer.DeployV2DisputeGamesDevFlag) + if deployV2Games { + if c.FaultGameMaxGameDepth == 0 { + return errors.New("fault game max game depth must be specified when V2 dispute games feature is enabled") + } + if c.FaultGameSplitDepth == 0 { + return errors.New("fault game split depth must be specified when V2 dispute games feature is enabled") + } + if c.FaultGameClockExtension == 0 { + return errors.New("fault game clock extension must be specified when V2 dispute games feature is enabled") + } + if c.FaultGameMaxClockDuration == 0 { + return errors.New("fault game max clock duration must be specified when V2 dispute games feature is enabled") + } + } if c.SuperchainConfigProxy == (common.Address{}) { return errors.New("superchain config proxy must be specified") } if c.ProtocolVersionsProxy == (common.Address{}) { return errors.New("protocol versions proxy must be specified") } - if c.UpgradeController == (common.Address{}) { - return errors.New("upgrade controller must be specified") + if c.L1ProxyAdminOwner == (common.Address{}) { + return errors.New("l1 proxy admin owner must be specified") } if c.SuperchainProxyAdmin == (common.Address{}) { return errors.New("superchain proxy admin must be specified") @@ -127,6 +138,13 @@ func ImplementationsCLI(cliCtx *cli.Context) error { } cfg.Logger = l + artifactsURLStr := cliCtx.String(deployer.ArtifactsLocatorFlagName) + artifactsLocator := new(artifacts.Locator) + if err := artifactsLocator.UnmarshalText([]byte(artifactsURLStr)); err != nil { + return fmt.Errorf("failed to parse artifacts URL: %w", err) + } + cfg.ArtifactsLocator = artifactsLocator + ctx := ctxinterrupt.WithCancelOnInterrupt(cliCtx.Context) outfile := cliCtx.String(OutfileFlagName) dio, err := Implementations(ctx, cfg) @@ -147,11 +165,7 @@ func Implementations(ctx context.Context, cfg ImplementationsConfig) (opcm.Deplo lgr := cfg.Logger - if cfg.ArtifactsLocator.IsTag() && !standard.IsSupportedL1Version(cfg.ArtifactsLocator.Tag) { - return dio, fmt.Errorf("unsupported L1 version: %s", cfg.ArtifactsLocator.Tag) - } - - artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor(), cfg.CacheDir) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, ioutil.BarProgressor(), cfg.CacheDir) if err != nil { return dio, fmt.Errorf("failed to download artifacts: %w", err) } @@ -210,11 +224,15 @@ func Implementations(ctx context.Context, cfg ImplementationsConfig) (opcm.Deplo ProofMaturityDelaySeconds: new(big.Int).SetUint64(cfg.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(cfg.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(uint64(cfg.MIPSVersion)), - L1ContractsRelease: cfg.L1ContractsRelease, + DevFeatureBitmap: cfg.DevFeatureBitmap, + FaultGameV2MaxGameDepth: new(big.Int).SetUint64(cfg.FaultGameMaxGameDepth), + FaultGameV2SplitDepth: new(big.Int).SetUint64(cfg.FaultGameSplitDepth), + FaultGameV2ClockExtension: new(big.Int).SetUint64(cfg.FaultGameClockExtension), + FaultGameV2MaxClockDuration: new(big.Int).SetUint64(cfg.FaultGameMaxClockDuration), SuperchainConfigProxy: cfg.SuperchainConfigProxy, ProtocolVersionsProxy: cfg.ProtocolVersionsProxy, SuperchainProxyAdmin: cfg.SuperchainProxyAdmin, - UpgradeController: cfg.UpgradeController, + L1ProxyAdminOwner: cfg.L1ProxyAdminOwner, Challenger: cfg.Challenger, }, ); err != nil { diff --git a/op-deployer/pkg/deployer/bootstrap/implementations_test.go b/op-deployer/pkg/deployer/bootstrap/implementations_test.go index a873abae8ad00..bb08d8a3bad27 100644 --- a/op-deployer/pkg/deployer/bootstrap/implementations_test.go +++ b/op-deployer/pkg/deployer/bootstrap/implementations_test.go @@ -20,6 +20,8 @@ import ( "github.com/stretchr/testify/require" ) +var networks = []string{"mainnet", "sepolia"} + func TestImplementations(t *testing.T) { testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) @@ -71,17 +73,17 @@ func testImplementations(t *testing.T, forkRPCURL string, cacheDir string) { PrivateKey: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", ArtifactsLocator: loc, Logger: lgr, - L1ContractsRelease: "dev", WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, MinProposalSizeBytes: standard.MinProposalSizeBytes, ChallengePeriodSeconds: standard.ChallengePeriodSeconds, ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, MIPSVersion: int(standard.MIPSVersion), + DevFeatureBitmap: common.Hash{}, SuperchainConfigProxy: superchain.SuperchainConfigAddr, ProtocolVersionsProxy: superchain.ProtocolVersionsAddr, SuperchainProxyAdmin: proxyAdminOwner, - UpgradeController: proxyAdminOwner, + L1ProxyAdminOwner: proxyAdminOwner, Challenger: common.Address{'C'}, CacheDir: cacheDir, }) diff --git a/op-deployer/pkg/deployer/bootstrap/proxy.go b/op-deployer/pkg/deployer/bootstrap/proxy.go index 865dfe5b8a7ee..e0922d4cb56ba 100644 --- a/op-deployer/pkg/deployer/bootstrap/proxy.go +++ b/op-deployer/pkg/deployer/bootstrap/proxy.go @@ -114,7 +114,7 @@ func Proxy(ctx context.Context, cfg ProxyConfig) (opcm.DeployProxyOutput, error) } lgr := cfg.Logger - artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor(), cfg.CacheDir) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, ioutil.BarProgressor(), cfg.CacheDir) if err != nil { return dpo, fmt.Errorf("failed to download artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/bootstrap/superchain.go b/op-deployer/pkg/deployer/bootstrap/superchain.go index 49c5e8f890f04..454d09a231fa8 100644 --- a/op-deployer/pkg/deployer/bootstrap/superchain.go +++ b/op-deployer/pkg/deployer/bootstrap/superchain.go @@ -151,7 +151,7 @@ func Superchain(ctx context.Context, cfg SuperchainConfig) (opcm.DeploySuperchai lgr := cfg.Logger cacheDir := cfg.CacheDir - artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor(), cacheDir) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, ioutil.BarProgressor(), cacheDir) if err != nil { return dso, fmt.Errorf("failed to download artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/bootstrap/superchain_test.go b/op-deployer/pkg/deployer/bootstrap/superchain_test.go index 4b4cf0d1727de..a48e9263c6428 100644 --- a/op-deployer/pkg/deployer/bootstrap/superchain_test.go +++ b/op-deployer/pkg/deployer/bootstrap/superchain_test.go @@ -19,26 +19,18 @@ import ( "github.com/stretchr/testify/require" ) -var networks = []string{"mainnet", "sepolia"} - -var versions = []string{"v1.6.0", "v1.8.0-rc.4"} - func TestSuperchain(t *testing.T) { - t.Skipf("The regression tests for the legacy artifacts have been disabled until new artifacts are released") - for _, network := range networks { - for _, version := range versions { - t.Run(network+"-"+version, func(t *testing.T) { - envVar := strings.ToUpper(network) + "_RPC_URL" - rpcURL := os.Getenv(envVar) - require.NotEmpty(t, rpcURL, "must specify RPC url via %s env var", envVar) - testSuperchain(t, rpcURL, version) - }) - } + t.Run(network, func(t *testing.T) { + envVar := strings.ToUpper(network) + "_RPC_URL" + rpcURL := os.Getenv(envVar) + require.NotEmpty(t, rpcURL, "must specify RPC url via %s env var", envVar) + testSuperchain(t, rpcURL) + }) } } -func testSuperchain(t *testing.T, forkRPCURL string, version string) { +func testSuperchain(t *testing.T, forkRPCURL string) { t.Parallel() if forkRPCURL == "" { @@ -62,7 +54,7 @@ func testSuperchain(t *testing.T, forkRPCURL string, version string) { out, err := Superchain(ctx, SuperchainConfig{ L1RPCUrl: l1RPC, PrivateKey: "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - ArtifactsLocator: artifacts.MustNewLocatorFromTag("op-contracts/" + version), + ArtifactsLocator: artifacts.EmbeddedLocator, Logger: lgr, SuperchainProxyAdminOwner: common.Address{'S'}, diff --git a/op-deployer/pkg/deployer/broadcaster/calldata.go b/op-deployer/pkg/deployer/broadcaster/calldata.go index 10b4b8d493b4a..38c551f788bdf 100644 --- a/op-deployer/pkg/deployer/broadcaster/calldata.go +++ b/op-deployer/pkg/deployer/broadcaster/calldata.go @@ -39,7 +39,7 @@ func (d *CalldataBroadcaster) Dump() ([]CalldataDump, error) { d.mtx.Lock() defer d.mtx.Unlock() - var out []CalldataDump + out := make([]CalldataDump, 0, len(d.txs)) for _, tx := range d.txs { out = append(out, CalldataDump{ To: tx.To, diff --git a/op-deployer/pkg/deployer/broadcaster/keyed.go b/op-deployer/pkg/deployer/broadcaster/keyed.go index 833fe2b0e176b..b856490a7be49 100644 --- a/op-deployer/pkg/deployer/broadcaster/keyed.go +++ b/op-deployer/pkg/deployer/broadcaster/keyed.go @@ -23,7 +23,7 @@ import ( ) const ( - GasPadFactor = 2.0 + GasPadFactor = 1.2 ) type KeyedBroadcaster struct { diff --git a/op-deployer/pkg/deployer/devfeatures.go b/op-deployer/pkg/deployer/devfeatures.go new file mode 100644 index 0000000000000..8e62bb1a42fd4 --- /dev/null +++ b/op-deployer/pkg/deployer/devfeatures.go @@ -0,0 +1,32 @@ +package deployer + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// Development feature flag constants that mirror the solidity DevFeatures library. +// These use a 32 byte bitmap for easy integration between op-deployer and contracts. +var ( + // OptimismPortalInteropDevFlag enables the OptimismPortalInterop contract. + OptimismPortalInteropDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001") + + // CannonKonaDevFlag enables Kona as the default cannon prover. + CannonKonaDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000010") + + // DeployV2DisputeGamesDevFlag enables deployment of V2 dispute game contracts. + DeployV2DisputeGamesDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000100") +) + +// IsDevFeatureEnabled checks if a specific development feature is enabled in a feature bitmap. +// It performs a bitwise AND operation between the bitmap and the feature flag to determine +// if the feature is enabled. This follows the same pattern as the solidity DevFeatures library. +func IsDevFeatureEnabled(bitmap, flag common.Hash) bool { + b := new(big.Int).SetBytes(bitmap[:]) + f := new(big.Int).SetBytes(flag[:]) + + featuresIsNonZero := f.Cmp(big.NewInt(0)) != 0 + bitmapContainsFeatures := new(big.Int).And(b, f).Cmp(f) == 0 + return featuresIsNonZero && bitmapContainsFeatures +} diff --git a/op-deployer/pkg/deployer/devfeatures_test.go b/op-deployer/pkg/deployer/devfeatures_test.go new file mode 100644 index 0000000000000..826d8a8908334 --- /dev/null +++ b/op-deployer/pkg/deployer/devfeatures_test.go @@ -0,0 +1,103 @@ +package deployer + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +var ( + FEATURE_A = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000001") + FEATURE_B = common.HexToHash("0000000000000000000000000000000000000000000000000000000000000100") + FEATURE_C = common.HexToHash("1000000000000000000000000000000000000000000000000000000000000000") + FEATURES_AB = or(FEATURE_A, FEATURE_B) + FEATURES_ABC = or(FEATURE_A, FEATURE_B, FEATURE_C) + FEATURES_AB_INVERTED = not(FEATURES_AB) + EMPTY_FEATURES = [32]byte{} + ALL_FEATURES = common.HexToHash("1111111111111111111111111111111111111111111111111111111111111111") +) + +func TestIsDevFeatureEnabled(t *testing.T) { + t.Run("single feature exact match", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(FEATURE_A, FEATURE_A)) + require.True(t, IsDevFeatureEnabled(FEATURE_B, FEATURE_B)) + }) + + t.Run("single feature against superset", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(FEATURES_AB, FEATURE_A)) + require.True(t, IsDevFeatureEnabled(FEATURES_AB, FEATURE_B)) + require.True(t, IsDevFeatureEnabled(FEATURES_ABC, FEATURE_A)) + }) + + t.Run("single feature against all", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(ALL_FEATURES, FEATURE_A)) + require.True(t, IsDevFeatureEnabled(ALL_FEATURES, FEATURE_B)) + }) + + t.Run("single feature against mismatched bitmap", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(FEATURE_B, FEATURE_A)) + require.False(t, IsDevFeatureEnabled(FEATURE_A, FEATURE_B)) + require.False(t, IsDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURE_A)) + require.False(t, IsDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURE_B)) + }) + + t.Run("single feature against empty", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(EMPTY_FEATURES, FEATURE_A)) + require.False(t, IsDevFeatureEnabled(EMPTY_FEATURES, FEATURE_B)) + }) + + t.Run("combined features exact match", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(FEATURES_AB, FEATURES_AB)) + }) + + t.Run("combined features against superset", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(ALL_FEATURES, FEATURES_AB)) + require.True(t, IsDevFeatureEnabled(FEATURES_ABC, FEATURES_AB)) + }) + + t.Run("combined features against subset", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(FEATURE_A, FEATURES_AB)) + require.False(t, IsDevFeatureEnabled(FEATURE_B, FEATURES_AB)) + }) + + t.Run("combined features against mismatched bitmap", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURES_AB)) + require.False(t, IsDevFeatureEnabled(EMPTY_FEATURES, FEATURES_AB)) + require.False(t, IsDevFeatureEnabled(FEATURE_C, FEATURES_AB)) + }) + + t.Run("empty vs empty", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(EMPTY_FEATURES, EMPTY_FEATURES)) + }) + + t.Run("all vs all", func(t *testing.T) { + require.True(t, IsDevFeatureEnabled(ALL_FEATURES, ALL_FEATURES)) + }) + + t.Run("empty against all", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(ALL_FEATURES, EMPTY_FEATURES)) + }) + + t.Run("all against empty", func(t *testing.T) { + require.False(t, IsDevFeatureEnabled(EMPTY_FEATURES, ALL_FEATURES)) + }) +} + +func or(values ...[32]byte) [32]byte { + var out [32]byte + for i := 0; i < 32; i++ { + for _, v := range values { + out[i] |= v[i] + } + } + return out +} + +func not(a [32]byte) [32]byte { + var out [32]byte + for i := 0; i < 32; i++ { + out[i] = ^a[i] + } + return out +} diff --git a/op-deployer/pkg/deployer/flags.go b/op-deployer/pkg/deployer/flags.go index 2f24c38402ae9..c4518c88fa1a5 100644 --- a/op-deployer/pkg/deployer/flags.go +++ b/op-deployer/pkg/deployer/flags.go @@ -2,9 +2,8 @@ package deployer import ( "fmt" - "log" - "os" - "path" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" @@ -29,40 +28,6 @@ const ( ContractNameFlagName = "contract-name" ) -type DeploymentTarget string - -const ( - DeploymentTargetLive DeploymentTarget = "live" - DeploymentTargetGenesis DeploymentTarget = "genesis" - DeploymentTargetCalldata DeploymentTarget = "calldata" - DeploymentTargetNoop DeploymentTarget = "noop" -) - -func NewDeploymentTarget(s string) (DeploymentTarget, error) { - switch s { - case string(DeploymentTargetLive): - return DeploymentTargetLive, nil - case string(DeploymentTargetGenesis): - return DeploymentTargetGenesis, nil - case string(DeploymentTargetCalldata): - return DeploymentTargetCalldata, nil - case string(DeploymentTargetNoop): - return DeploymentTargetNoop, nil - default: - return "", fmt.Errorf("invalid deployment target: %s", s) - } -} - -func GetDefaultCacheDir() string { - homeDir, err := os.UserHomeDir() - if err != nil { - fallbackDir := ".op-deployer/cache" - log.Printf("error getting user home directory: %v, using fallback directory: %s\n", err, fallbackDir) - return fallbackDir - } - return path.Join(homeDir, ".op-deployer/cache") -} - var ( L1RPCURLFlag = &cli.StringFlag{ Name: L1RPCURLFlagName, @@ -76,13 +41,14 @@ var ( Name: ArtifactsLocatorFlagName, Usage: "Locator for artifacts.", EnvVars: PrefixEnvVar("ARTIFACTS_LOCATOR"), + Value: artifacts.EmbeddedLocatorString, } CacheDirFlag = &cli.StringFlag{ Name: CacheDirFlagName, Usage: "Cache directory. " + "If set, the deployer will attempt to cache downloaded artifacts in the specified directory.", EnvVars: PrefixEnvVar("CACHE_DIR"), - Value: GetDefaultCacheDir(), + Value: DefaultCacheDir(), } L1ChainIDFlag = &cli.Uint64Flag{ Name: L1ChainIDFlagName, @@ -184,11 +150,3 @@ var VerifyFlags = []cli.Flag{ func PrefixEnvVar(name string) []string { return op_service.PrefixEnvVar(EnvVarPrefix, name) } - -func cwd() string { - dir, err := os.Getwd() - if err != nil { - return "" - } - return dir -} diff --git a/op-deployer/pkg/deployer/forge/binary.go b/op-deployer/pkg/deployer/forge/binary.go new file mode 100644 index 0000000000000..aac8f86b62c21 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/binary.go @@ -0,0 +1,279 @@ +package forge + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + _ "embed" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "path" + "regexp" + "runtime" + + "github.com/ethereum-optimism/optimism/op-service/httputil" + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +//go:embed version.json +var versionJSON []byte + +type VersionConfig struct { + Forge string `json:"forge"` + Checksums map[string]string `json:"checksums"` +} + +// StandardVersion is the Foundry version that op-deployer will download if it's not found on PATH. +var StandardVersion string + +// checksums map the OS/architecture to the expected checksum of the binary. +var checksums = map[string]string{} + +func init() { + var versionConfig VersionConfig + if err := json.Unmarshal(versionJSON, &versionConfig); err != nil { + panic(err) + } + StandardVersion = versionConfig.Forge + checksums = versionConfig.Checksums +} + +// maxDownloadSize is the maximum size of the Foundry tarball that will be downloaded. It's typically ~60MB so +// this should be more than enough. +const maxDownloadSize = 100 * 1024 * 1024 + +func getOS() string { + if os.Getenv("FORGE_ENV") == "alpine" { + return "alpine" + } + sysOS := runtime.GOOS + if runtime.GOOS == "windows" { + sysOS = "win32" + } + return sysOS +} + +func binaryURL(sysOS, sysArch string) string { + return fmt.Sprintf("https://github.com/foundry-rs/foundry/releases/download/%s/foundry_%s_%s_%s.tar.gz", StandardVersion, StandardVersion, sysOS, sysArch) +} + +type Binary interface { + Ensure(ctx context.Context) error + Path() string +} + +type Bin struct { + path string +} + +func StaticBinary(path string) Binary { + return &Bin{path: path} +} + +func (b *Bin) Ensure(ctx context.Context) error { + return nil +} + +func (b *Bin) Path() string { + return b.path +} + +type PathBin struct { + path string +} + +func PathBinary() Binary { + return new(PathBin) +} + +func (b *PathBin) Ensure(ctx context.Context) error { + var err error + b.path, err = exec.LookPath("forge") + if err != nil { + return fmt.Errorf("could not find binary: %w", err) + } + return nil +} + +func (b *PathBin) Path() string { + return b.path +} + +// StandardBin forces the use of the standard forge binary version by +// first checking for the version locally, then downloading from github +// if needed +type StandardBin struct { + progressor ioutil.Progressor + + cachePather func() (string, error) + checksummer func(r io.Reader) error + url string + path string +} + +type StandardBinOpt func(s *StandardBin) + +func WithProgressor(p ioutil.Progressor) StandardBinOpt { + return func(s *StandardBin) { + s.progressor = p + } +} + +func WithURL(url string) StandardBinOpt { + return func(s *StandardBin) { + s.url = url + } +} + +func WithCachePather(pather func() (string, error)) StandardBinOpt { + return func(s *StandardBin) { + s.cachePather = pather + } +} + +func WithChecksummer(checksummer func(r io.Reader) error) StandardBinOpt { + return func(s *StandardBin) { + s.checksummer = checksummer + } +} + +func homedirCachePather() (string, error) { + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("could not find home directory: %w", err) + } + return path.Join(homeDir, ".op-deployer", "cache"), nil +} + +func staticChecksummer(expChecksum string) func(r io.Reader) error { + return func(r io.Reader) error { + h := sha256.New() + if _, err := io.Copy(h, r); err != nil { + return fmt.Errorf("could not calculate checksum: %w", err) + } + gotChecksum := fmt.Sprintf("%x", h.Sum(nil)) + if gotChecksum != expChecksum { + return fmt.Errorf("checksum mismatch: expected %s, got %s", expChecksum, gotChecksum) + } + return nil + } +} + +func githubChecksummer(r io.Reader) error { + expChecksum := checksums[getOS()+"_"+runtime.GOARCH] + if expChecksum == "" { + return fmt.Errorf("could not find checksum for %s_%s", getOS(), runtime.GOARCH) + } + return staticChecksummer(expChecksum)(r) +} + +func NewStandardBinary(opts ...StandardBinOpt) (*StandardBin, error) { + bin := &StandardBin{ + url: binaryURL(getOS(), runtime.GOARCH), + cachePather: homedirCachePather, + checksummer: githubChecksummer, + } + for _, opt := range opts { + opt(bin) + } + return bin, nil +} + +func (b *StandardBin) Ensure(ctx context.Context) error { + // 1) Exit early if b.path already set (via previous Ensure call) + if b.path != "" { + return nil + } + + // 2) PATH: use if version matches the pinned Version + if forgePath, err := exec.LookPath("forge"); err == nil { + if ver, err := getForgeVersion(ctx, forgePath); err == nil && ver == StandardVersion { + b.path = forgePath + return nil + } + } + + // 3) Cache: use if version matches; otherwise replace it + binDir, err := b.cachePather() + if err != nil { + return fmt.Errorf("could not provide cache dir: %w", err) + } + binPath := path.Join(binDir, "forge") + if st, err := os.Stat(binPath); err == nil && !st.IsDir() { + // forge binary exists in cache; check version + if ver, err := getForgeVersion(ctx, binPath); err == nil && ver == StandardVersion { + b.path = binPath + return nil + } + } else if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("could not stat %s: %w", binPath, err) + } + + // 4) Download expected version for this OS/arch and verify checksum + if err := b.downloadBinary(ctx, binDir); err != nil { + return fmt.Errorf("could not download binary: %w", err) + } + b.path = binPath + return nil +} + +func (b *StandardBin) Path() string { + return b.path +} + +func (b *StandardBin) downloadBinary(ctx context.Context, dest string) error { + tmpDir, err := os.MkdirTemp("", "op-deployer-forge-*") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer func() { + _ = os.RemoveAll(tmpDir) + }() + downloader := &httputil.Downloader{ + Progressor: b.progressor, + MaxSize: maxDownloadSize, + } + buf := new(bytes.Buffer) + if err := downloader.Download(ctx, b.url, buf); err != nil { + return fmt.Errorf("failed to download binary: %w", err) + } + data := buf.Bytes() + if err := b.checksummer(bytes.NewReader(data)); err != nil { + return fmt.Errorf("checksum mismatch: %w", err) + } + gzr, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("failed to create gzip reader: %w", err) + } + tr := tar.NewReader(gzr) + if err := ioutil.Untar(tmpDir, tr); err != nil { + return fmt.Errorf("failed to untar: %w", err) + } + if err := os.Rename(path.Join(tmpDir, "forge"), path.Join(dest, "forge")); err != nil { + return fmt.Errorf("failed to move binary: %w", err) + } + if err := os.Chmod(path.Join(dest, "forge"), 0o755); err != nil { + return fmt.Errorf("failed to set executable bit: %w", err) + } + return nil +} + +func getForgeVersion(ctx context.Context, forgePath string) (string, error) { + cmd := exec.CommandContext(ctx, forgePath, "--version") + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("exec %s --version failed: %w", forgePath, err) + } + // Example output: "forge Version: 1.3.1-v1.3.1" -> capture initial "1.3.1" + re := regexp.MustCompile(`(\d+\.\d+\.\d+)`) + m := re.FindStringSubmatch(string(out)) + if len(m) < 2 { + return "", fmt.Errorf("could not parse version tag from: %q", out) + } + return "v" + m[1], nil +} diff --git a/op-deployer/pkg/deployer/forge/binary_test.go b/op-deployer/pkg/deployer/forge/binary_test.go new file mode 100644 index 0000000000000..799a7fa76a973 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/binary_test.go @@ -0,0 +1,176 @@ +package forge + +import ( + "context" + "fmt" + "log/slog" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/testlog" + + "github.com/stretchr/testify/require" +) + +// TestStandardBinary_ForgeBins tests that the binary can be downloaded from the +// official release channel, and that their checksums are correct. +func TestStandardBinary_ForgeBins(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in -short mode") + } + + // Clear out the PATH env var so it forces a download. + t.Setenv("PATH", "") + + for target, checksum := range checksums { + t.Run(target, func(t *testing.T) { + lgr := testlog.Logger(t, slog.LevelInfo) + split := strings.Split(target, "_") + tgtOS, tgtArch := split[0], split[1] + + cacheDir := t.TempDir() + bin, err := NewStandardBinary( + WithURL(binaryURL(tgtOS, tgtArch)), + WithCachePather(func() (string, error) { return cacheDir, nil }), + WithProgressor(ioutil.NewLogProgressor(lgr, "downloading").Progressor), + WithChecksummer(staticChecksummer(checksum)), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) + defer cancel() + require.NoError(t, bin.Ensure(ctx)) + }) + } +} + +func TestStandardBinary_Downloads(t *testing.T) { + expChecksum, err := os.ReadFile("testdata/foundry.tgz.sha256") + require.NoError(t, err) + + // Serve the tar archive via an HTTP test server. + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + defer ts.Close() + + // Prepare a cache directory within the test's temporary directory. + cacheDir := t.TempDir() + + t.Run("download OK", func(t *testing.T) { + var progressed atomic.Bool + + bin, err := NewStandardBinary( + WithURL(ts.URL+"/foundry.tgz"), + WithCachePather(func() (string, error) { return cacheDir, nil }), + WithProgressor(func(curr, total int64) { + progressed.Store(true) + }), + WithChecksummer(staticChecksummer(string(expChecksum))), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + require.NoError(t, bin.Ensure(ctx)) + require.Equal(t, path.Join(cacheDir, "forge"), bin.Path()) + require.FileExists(t, bin.Path()) + require.True(t, progressed.Load()) + }) + + t.Run("invalid checksum", func(t *testing.T) { + bin, err := NewStandardBinary( + WithURL(ts.URL+"/foundry.tgz"), + WithCachePather(func() (string, error) { return "not-a-path", nil }), + WithChecksummer(staticChecksummer("beep beep")), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + require.ErrorContains(t, bin.Ensure(ctx), "checksum mismatch") + }) +} + +func TestStandardBinary_OnPath(t *testing.T) { + expChecksum, err := os.ReadFile("testdata/foundry.tgz.sha256") + require.NoError(t, err) + + // Serve the test tarball so we can force the download path. + ts := httptest.NewServer(http.FileServer(http.Dir("testdata"))) + defer ts.Close() + + makeForge := func(dir, versionLine string) string { + fp := path.Join(dir, "forge") + script := fmt.Sprintf(`#!/bin/sh +if [ "$1" = "--version" ]; then + echo "%s" + exit 0 +fi +exit 1 +`, versionLine) + require.NoError(t, os.WriteFile(fp, []byte(script), 0o777)) + require.NoError(t, os.Chmod(fp, 0o777)) + return fp + } + + cases := []struct { + name string + versionLine string + expectUsePath bool + }{ + { + name: "match_tag", + versionLine: fmt.Sprintf("forge Version: %s-%s", strings.TrimPrefix(StandardVersion, "v"), StandardVersion), + expectUsePath: true, + }, + { + name: "dev_tag", + versionLine: fmt.Sprintf("forge Version: %s-dev", strings.TrimPrefix(StandardVersion, "v")), + expectUsePath: true, + }, + { + name: "non_standard_tag", + versionLine: "forge Version: 0.0.0-v0.0.0", + expectUsePath: false, + }, + { + name: "garbage_output", + versionLine: "forge something unexpected", + expectUsePath: false, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + forgeDir := t.TempDir() + forgePath := makeForge(forgeDir, tc.versionLine) + t.Setenv("PATH", forgeDir) + + cacheDir := t.TempDir() + bin, err := NewStandardBinary( + WithURL(ts.URL+"/foundry.tgz"), + WithCachePather(func() (string, error) { return cacheDir, nil }), + WithChecksummer(staticChecksummer(string(expChecksum))), + ) + require.NoError(t, err) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + require.NoError(t, bin.Ensure(ctx)) + + if tc.expectUsePath { + require.Equal(t, forgePath, bin.Path()) + require.NoFileExists(t, path.Join(cacheDir, "forge")) + } else { + require.Equal(t, path.Join(cacheDir, "forge"), bin.Path()) + require.FileExists(t, bin.Path()) + } + }) + } +} diff --git a/op-deployer/pkg/deployer/forge/client.go b/op-deployer/pkg/deployer/forge/client.go new file mode 100644 index 0000000000000..f9f6ef9be6e17 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/client.go @@ -0,0 +1,141 @@ +package forge + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" +) + +var ( + versionRegexp = regexp.MustCompile(`(?i)forge version: (.*)\ncommit sha: ([a-f0-9]+)\n`) + sigilRegexp = regexp.MustCompile(`(?i)== Return ==\n0: bytes 0x([a-f0-9]+)\n`) +) + +type VersionInfo struct { + Semver string + SHA string +} + +type Client struct { + Binary Binary + Stdout io.Writer + Stderr io.Writer + Wd string +} + +func NewClient(binary Binary) *Client { + return &Client{ + Binary: binary, + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +func (c *Client) Version(ctx context.Context) (VersionInfo, error) { + buf := new(bytes.Buffer) + if err := c.execCmd(ctx, buf, io.Discard, "--version"); err != nil { + return VersionInfo{}, fmt.Errorf("failed to run forge version command: %w", err) + } + outputStr := buf.String() + matches := versionRegexp.FindAllStringSubmatch(outputStr, -1) + if len(matches) != 1 || len(matches[0]) != 3 { + return VersionInfo{}, fmt.Errorf("failed to find forge version in output:\n%s", outputStr) + } + return VersionInfo{ + Semver: matches[0][1], + SHA: matches[0][2], + }, nil +} + +func (c *Client) Build(ctx context.Context, opts ...string) error { + return c.execCmd(ctx, io.Discard, io.Discard, append([]string{"build"}, opts...)...) +} + +func (c *Client) Clean(ctx context.Context, opts ...string) error { + return c.execCmd(ctx, io.Discard, io.Discard, append([]string{"clean"}, opts...)...) +} + +func (c *Client) RunScript(ctx context.Context, script string, sig string, args []byte, opts ...string) (string, error) { + buf := new(bytes.Buffer) + cliOpts := []string{"script"} + cliOpts = append(cliOpts, opts...) + cliOpts = append(cliOpts, "--sig", sig, script, "0x"+hex.EncodeToString(args)) + if err := c.execCmd(ctx, buf, io.Discard, cliOpts...); err != nil { + return "", fmt.Errorf("failed to execute forge script: %w", err) + } + return buf.String(), nil +} + +func (c *Client) execCmd(ctx context.Context, stdout io.Writer, stderr io.Writer, args ...string) error { + if err := c.Binary.Ensure(ctx); err != nil { + return fmt.Errorf("failed to ensure binary: %w", err) + } + + cmd := exec.CommandContext(ctx, c.Binary.Path(), args...) + cStdout := c.Stdout + if cStdout == nil { + cStdout = os.Stdout + } + cStderr := c.Stderr + if cStderr == nil { + cStderr = os.Stderr + } + + mwStdout := io.MultiWriter(cStdout, stdout) + mwStderr := io.MultiWriter(cStderr, stderr) + cmd.Stdout = mwStdout + cmd.Stderr = mwStderr + cmd.Dir = c.Wd + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to run forge command: %w", err) + } + return nil +} + +type ScriptCallEncoder[I any] interface { + Encode(I) ([]byte, error) +} + +type ScriptCallDecoder[O any] interface { + Decode(raw []byte) (O, error) +} + +// ScriptCaller is a function that calls a forge script +// Ouputs: +// - Return value of the script (decoded into go type) +// - Bool indicating if the script was recompiled (mostly used for testing) +// - Error if the script fails to run +type ScriptCaller[I any, O any] func(ctx context.Context, input I, opts ...string) (O, bool, error) + +func NewScriptCaller[I any, O any](client *Client, script string, sig string, encoder ScriptCallEncoder[I], decoder ScriptCallDecoder[O]) ScriptCaller[I, O] { + return func(ctx context.Context, input I, opts ...string) (O, bool, error) { + var out O + encArgs, err := encoder.Encode(input) + if err != nil { + return out, false, fmt.Errorf("failed to encode forge args: %w", err) + } + rawOut, err := client.RunScript(ctx, script, sig, encArgs, opts...) + if err != nil { + return out, false, fmt.Errorf("failed to run forge script: %w", err) + } + sigilMatches := sigilRegexp.FindAllStringSubmatch(rawOut, -1) + if len(sigilMatches) != 1 || len(sigilMatches[0]) != 2 { + return out, false, fmt.Errorf("failed to find forge return value in output:\n%s", rawOut) + } + decoded, err := hex.DecodeString(sigilMatches[0][1]) + if err != nil { + return out, false, fmt.Errorf("failed to decode forge return value %s: %w", sigilMatches[0][1], err) + } + out, err = decoder.Decode(decoded) + if err != nil { + return out, false, fmt.Errorf("failed to decode forge output: %w", err) + } + return out, strings.Contains(rawOut, "Compiler run successful!"), nil + } +} diff --git a/op-deployer/pkg/deployer/forge/client_test.go b/op-deployer/pkg/deployer/forge/client_test.go new file mode 100644 index 0000000000000..4ef02955f52ca --- /dev/null +++ b/op-deployer/pkg/deployer/forge/client_test.go @@ -0,0 +1,178 @@ +package forge + +import ( + "bytes" + "context" + "io" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type ioStruct struct { + ID uint8 + Data []byte + Slice []uint32 + Array [3]uint64 +} + +func TestMinimalSources(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + cl := NewClient(PathBinary()) + cl.Wd = projDir(t) + + // Build artifacts + require.NoError(t, cl.Build(ctx)) + + // Then copy them somewhere else + tmpDir := t.TempDir() + require.NoError(t, copyDir("testdata/testproject/out", path.Join(tmpDir, "out"))) + require.NoError(t, copyDir("testdata/testproject/cache", path.Join(tmpDir, "cache"))) + require.NoError(t, copyDir("testdata/testproject/script", path.Join(tmpDir, "script"))) + require.NoError(t, copyDir("testdata/testproject/foundry.toml", path.Join(tmpDir, "foundry.toml"))) + + // Then see if we can successfully run a script + cl.Wd = tmpDir + caller := NewScriptCaller( + cl, + "script/Test.s.sol:TestScript", + "runWithBytes(bytes)", + &BytesScriptEncoder[ioStruct]{TypeName: "ioStruct"}, + &BytesScriptDecoder[ioStruct]{TypeName: "ioStruct"}, + ) + // It should not recompile since we included the cache. + in := ioStruct{ + ID: 1, + Data: []byte{0x01, 0x02, 0x03, 0x04}, + Slice: []uint32{0x01, 0x02, 0x03, 0x04}, + Array: [3]uint64{0x01, 0x02, 0x03}, + } + out, changed, err := caller(ctx, in) + require.NoError(t, err) + require.False(t, changed) + require.EqualValues(t, ioStruct{ + ID: 2, + Data: in.Data, + Slice: in.Slice, + Array: in.Array, + }, out) +} + +// TestClient_Smoke smoke tests the Client by running the Version command on it. +func TestClient_Smoke(t *testing.T) { + bin := PathBinary() + cl := NewClient(bin) + + version, err := cl.Version(context.Background()) + require.NoError(t, err) + require.Regexp(t, regexp.MustCompile(`\d+\.\d+\.\d+`), version.Semver) + require.Regexp(t, regexp.MustCompile(`^[a-f0-9]+$`), version.SHA) +} + +func TestClient_OutputRedirection(t *testing.T) { + bin := PathBinary() + cl := NewClient(bin) + cl.Stdout = new(bytes.Buffer) + + _, err := cl.Version(context.Background()) + require.NoError(t, err) + require.True(t, strings.HasPrefix(cl.Stdout.(*bytes.Buffer).String(), "forge Version")) +} + +func TestScriptCaller(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + bin := PathBinary() + cl := NewClient(bin) + cl.Wd = projDir(t) + + require.NoError(t, cl.Clean(ctx)) + caller := NewScriptCaller( + cl, + "script/Test.s.sol:TestScript", + "runWithBytes(bytes)", + &BytesScriptEncoder[ioStruct]{TypeName: "ioStruct"}, + &BytesScriptDecoder[ioStruct]{TypeName: "ioStruct"}, + ) + + in := ioStruct{ + ID: 1, + Data: []byte{0x01, 0x02}, + Slice: []uint32{0x01, 0x02, 0x03, 0x04}, + Array: [3]uint64{0x01, 0x02, 0x03}, + } + out, recompiled, err := caller(context.Background(), in) + require.NoError(t, err) + require.True(t, recompiled) + require.EqualValues(t, ioStruct{ + ID: 2, + Data: in.Data, + Slice: in.Slice, + Array: in.Array, + }, out) + out, recompiled, err = caller(context.Background(), in) + require.NoError(t, err) + require.False(t, recompiled) + require.EqualValues(t, ioStruct{ + ID: 2, + Data: in.Data, + Slice: in.Slice, + Array: in.Array, + }, out) +} + +func copyDir(src, dst string) error { + return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + relPath, err := filepath.Rel(src, path) + if err != nil { + return err + } + targetPath := filepath.Join(dst, relPath) + + if info.IsDir() { + return os.MkdirAll(targetPath, 0755) + } + + return copyFile(path, targetPath) + }) +} + +func copyFile(src, dst string) error { + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, in) + return err +} + +func projDir(t *testing.T) string { + _, testFilename, _, ok := runtime.Caller(0) + require.True(t, ok) + dir := filepath.Join(filepath.Dir(testFilename), "testdata", "testproject") + absProjDir, err := filepath.Abs(dir) + require.NoError(t, err) + return absProjDir +} diff --git a/op-deployer/pkg/deployer/forge/encoding.go b/op-deployer/pkg/deployer/forge/encoding.go new file mode 100644 index 0000000000000..4776d9dfbe378 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/encoding.go @@ -0,0 +1,170 @@ +package forge + +import ( + "fmt" + "math/big" + "reflect" + + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" +) + +func GoStructToABITuple(structType reflect.Type, tupleName string) (abi.Type, error) { + var components []abi.ArgumentMarshaling + + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + + abiType, err := GoTypeToABIType(field.Type) + if err != nil { + return abi.Type{}, fmt.Errorf("unsupported field type %s: %w", field.Type, err) + } + + components = append(components, abi.ArgumentMarshaling{ + Name: field.Name, + Type: abiType, + }) + } + + return abi.NewType("tuple", tupleName, components) +} + +func GoTypeToABIType(goType reflect.Type) (string, error) { + // handle pointers by dereferencing + if goType.Kind() == reflect.Ptr { + goType = goType.Elem() + } + + // non-standard go types (need to catch these first) + switch goType { + case reflect.TypeOf(common.Address{}): + return "address", nil + case reflect.TypeOf(common.Hash{}): + return "bytes32", nil + case reflect.TypeOf(params.ProtocolVersion{}): + return "bytes32", nil + case reflect.TypeOf(big.NewInt(0)).Elem(): + return "uint256", nil + } + + // standard go types + switch goType.Kind() { + case reflect.Slice: + elemType := goType.Elem() + + // special case: []byte -> "bytes" + if elemType.Kind() == reflect.Uint8 { + return "bytes", nil + } + + // recursive: []T -> "T[]" + elemABI, err := GoTypeToABIType(elemType) + if err != nil { + return "", fmt.Errorf("unsupported slice element type: %w", err) + } + return elemABI + "[]", nil + + case reflect.Array: + elemType := goType.Elem() + arrayLen := goType.Len() + + // recursive: [N]T -> "T[N]" + elemABI, err := GoTypeToABIType(elemType) + if err != nil { + return "", fmt.Errorf("unsupported array element type: %w", err) + } + return fmt.Sprintf("%s[%d]", elemABI, arrayLen), nil + + case reflect.String: + return "string", nil + case reflect.Bool: + return "bool", nil + case reflect.Uint8: + return "uint8", nil + case reflect.Uint16: + return "uint16", nil + case reflect.Uint32: + return "uint32", nil + case reflect.Uint64, reflect.Uint: + return "uint64", nil + case reflect.Int8: + return "int8", nil + case reflect.Int16: + return "int16", nil + case reflect.Int32: + return "int32", nil + case reflect.Int64, reflect.Int: + return "int64", nil + } + + return "", fmt.Errorf("unable to convert go type to abi type: %s", goType) +} + +func ConvertAnonStructToTyped[T any](anonStruct interface{}) (T, error) { + var result T + + srcVal := reflect.ValueOf(anonStruct) + destVal := reflect.ValueOf(&result).Elem() + + // Ensure both are structs + if srcVal.Kind() != reflect.Struct || destVal.Kind() != reflect.Struct { + return result, fmt.Errorf("both source and destination must be structs") + } + + // Check field count matches + if srcVal.NumField() != destVal.NumField() { + return result, fmt.Errorf("field count mismatch: source has %d, destination has %d", + srcVal.NumField(), destVal.NumField()) + } + + // Copy fields by index (assumes same field order) + for i := 0; i < srcVal.NumField(); i++ { + srcField := srcVal.Field(i) + destField := destVal.Field(i) + + if destField.CanSet() { + destField.Set(srcField) + } + } + + return result, nil +} + +type BytesScriptEncoder[T any] struct { + TypeName string // e.g., "DeploySuperchainInput" +} + +func (e *BytesScriptEncoder[T]) Encode(input T) ([]byte, error) { + inputType, err := GoStructToABITuple(reflect.TypeOf(input), e.TypeName) + if err != nil { + return nil, fmt.Errorf("failed to create input type: %w", err) + } + + args := abi.Arguments{{Type: inputType}} + return args.Pack(input) +} + +type BytesScriptDecoder[T any] struct { + TypeName string // e.g., "DeploySuperchainOutput" +} + +func (d *BytesScriptDecoder[T]) Decode(rawOutput []byte) (T, error) { + var zero T + outputType, err := GoStructToABITuple(reflect.TypeOf(zero), d.TypeName) + if err != nil { + return zero, fmt.Errorf("failed to create output type: %w", err) + } + + args := abi.Arguments{{Type: outputType}} + unpacked, err := args.Unpack(rawOutput) + if err != nil { + return zero, fmt.Errorf("failed to unpack output: %w", err) + } + + if len(unpacked) != 1 { + return zero, fmt.Errorf("expected 1 unpacked value, got %d", len(unpacked)) + } + + return ConvertAnonStructToTyped[T](unpacked[0]) +} diff --git a/op-deployer/pkg/deployer/forge/testdata/foundry.tgz b/op-deployer/pkg/deployer/forge/testdata/foundry.tgz new file mode 100644 index 0000000000000..6362fc6e9a220 Binary files /dev/null and b/op-deployer/pkg/deployer/forge/testdata/foundry.tgz differ diff --git a/op-deployer/pkg/deployer/forge/testdata/foundry.tgz.sha256 b/op-deployer/pkg/deployer/forge/testdata/foundry.tgz.sha256 new file mode 100644 index 0000000000000..585f02f27347b --- /dev/null +++ b/op-deployer/pkg/deployer/forge/testdata/foundry.tgz.sha256 @@ -0,0 +1 @@ +15cb653675d5af82c3f540f85a330bf7e6edb6a142b199246409cab99610419e \ No newline at end of file diff --git a/op-deployer/pkg/deployer/forge/testdata/testproject/.gitignore b/op-deployer/pkg/deployer/forge/testdata/testproject/.gitignore new file mode 100644 index 0000000000000..85198aaa55b84 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/testdata/testproject/.gitignore @@ -0,0 +1,14 @@ +# Compiler files +cache/ +out/ + +# Ignores development broadcast logs +!/broadcast +/broadcast/*/31337/ +/broadcast/**/dry-run/ + +# Docs +docs/ + +# Dotenv file +.env diff --git a/op-deployer/pkg/deployer/forge/testdata/testproject/README.md b/op-deployer/pkg/deployer/forge/testdata/testproject/README.md new file mode 100644 index 0000000000000..9a04181257f98 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/testdata/testproject/README.md @@ -0,0 +1,3 @@ +# testproject + +This project is used to test the Forge Go client. \ No newline at end of file diff --git a/op-deployer/pkg/deployer/forge/testdata/testproject/foundry.toml b/op-deployer/pkg/deployer/forge/testdata/testproject/foundry.toml new file mode 100644 index 0000000000000..d7c0a71b2eae4 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/testdata/testproject/foundry.toml @@ -0,0 +1,9 @@ +[profile.default] +src = "src" +out = "out" +libs = [] +extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] +ast = true +bytecode_hash = 'none' +evm_version = 'cancun' +use_literal_content = true \ No newline at end of file diff --git a/op-deployer/pkg/deployer/forge/testdata/testproject/script/Test.s.sol b/op-deployer/pkg/deployer/forge/testdata/testproject/script/Test.s.sol new file mode 100644 index 0000000000000..186feb2704b54 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/testdata/testproject/script/Test.s.sol @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.13; + +contract TestScript { + struct Input { + uint8 id; + bytes data; + uint32[] slice; + uint256[3] array; + } + + struct Output { + uint8 id; + bytes data; + uint32[] slice; + uint256[3] array; + } + + function _run(Input memory _input) public pure returns (Output memory) { + return Output({ id: 0x02, data: _input.data, slice: _input.slice, array: _input.array }); + } + + function runWithBytes(bytes memory _input) public pure returns (bytes memory) { + Input memory input = abi.decode(_input, (Input)); + Output memory output = _run(input); + return abi.encode(output); + } +} diff --git a/op-deployer/pkg/deployer/forge/version.json b/op-deployer/pkg/deployer/forge/version.json new file mode 100644 index 0000000000000..26457bb396ea5 --- /dev/null +++ b/op-deployer/pkg/deployer/forge/version.json @@ -0,0 +1,11 @@ +{ + "forge": "v1.1.0", + "checksums": { + "darwin_amd64": "e82be5f4a57afb538188b4c41c610ec8261dcf58454140aaf7a2d74e6851cc68", + "darwin_arm64": "47efbcbcbe703f090e4fea8a98cd8b07446f5b4a9d01a55167e0b1585d661c95", + "linux_amd64": "22112a68962e0d1238aad3a33d888f399dfd9b6d9e981d801ffd09cb5357a9a4", + "linux_arm64": "ddcde0cd99cc71080d86f44a5d193d4dad089b33902e7278ae0a9dc7ad380d42", + "alpine_amd64": "d1867b1337be564421ace8f7f9ef2343f901d615b5e675ef6535595ab91cea02", + "alpine_arm64": "dbc69fe667c37272fd72df1a0d6d9976e590c264829b887af291603e31dd4350" + } +} diff --git a/op-deployer/pkg/deployer/inspect/semvers.go b/op-deployer/pkg/deployer/inspect/semvers.go index 05524dbb788e4..232f1d4bd8554 100644 --- a/op-deployer/pkg/deployer/inspect/semvers.go +++ b/op-deployer/pkg/deployer/inspect/semvers.go @@ -59,7 +59,7 @@ func L2SemversCLI(cliCtx *cli.Context) error { return fmt.Errorf("chain state does not have allocs") } - artifactsFS, err := artifacts.Download(ctx, intent.L2ContractsLocator, artifacts.BarProgressor(), cliCfg.CacheDir) + artifactsFS, err := artifacts.Download(ctx, intent.L2ContractsLocator, ioutil.BarProgressor(), cliCfg.CacheDir) if err != nil { return fmt.Errorf("failed to download L2 artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index 67e024fab36b7..390c37de14492 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -3,16 +3,16 @@ package integration_test import ( "bytes" "context" - "crypto/ecdsa" "encoding/hex" "log/slog" "math/big" - "regexp" "strings" "testing" "time" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/bootstrap" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/inspect" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/integration_test/shared" "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-service/testutils" @@ -35,7 +35,6 @@ import ( "github.com/holiman/uint256" - "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-service/predeploys" @@ -46,6 +45,8 @@ import ( "github.com/stretchr/testify/require" ) +const testCustomGasLimit = uint64(90_123_456) + type deployerKey struct{} func (d *deployerKey) HDPath() string { @@ -56,17 +57,6 @@ func (d *deployerKey) String() string { return "deployer-key" } -func defaultPrivkey(t *testing.T) (string, *ecdsa.PrivateKey, *devkeys.MnemonicDevKeys) { - pkHex := "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" - pk, err := crypto.HexToECDSA(pkHex) - require.NoError(t, err) - - dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) - require.NoError(t, err) - - return pkHex, pk, dk -} - // TestEndToEndBootstrapApply tests that a system can be fully bootstrapped and applied, both from // local artifacts and the default tagged artifacts. The tagged artifacts test only runs on proposal // or backports branches, since those are the only branches with an SLA to support tagged artifacts. @@ -75,7 +65,7 @@ func TestEndToEndBootstrapApply(t *testing.T) { lgr := testlog.Logger(t, slog.LevelDebug) l1RPC, l1Client := devnet.DefaultAnvilRPC(t, lgr) - pkHex, pk, dk := defaultPrivkey(t) + pkHex, pk, dk := shared.DefaultPrivkey(t) l1ChainID := new(big.Int).SetUint64(devnet.DefaultChainID) l2ChainID := uint256.NewInt(1) testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) @@ -100,25 +90,20 @@ func TestEndToEndBootstrapApply(t *testing.T) { }) require.NoError(t, err) - var release string - if !loc.IsTag() { - release = "dev" - } - impls, err := bootstrap.Implementations(ctx, bootstrap.ImplementationsConfig{ L1RPCUrl: l1RPC, PrivateKey: pkHex, ArtifactsLocator: loc, - L1ContractsRelease: release, MIPSVersion: int(standard.MIPSVersion), WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, MinProposalSizeBytes: standard.MinProposalSizeBytes, ChallengePeriodSeconds: standard.ChallengePeriodSeconds, ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, + DevFeatureBitmap: common.Hash{}, SuperchainConfigProxy: bstrap.SuperchainConfigProxy, ProtocolVersionsProxy: bstrap.ProtocolVersionsProxy, - UpgradeController: superchainPAO, + L1ProxyAdminOwner: superchainPAO, SuperchainProxyAdmin: bstrap.SuperchainProxyAdmin, CacheDir: testCacheDir, Logger: lgr, @@ -126,7 +111,7 @@ func TestEndToEndBootstrapApply(t *testing.T) { }) require.NoError(t, err) - intent, st := newIntent(t, l1ChainID, dk, l2ChainID, loc, loc) + intent, st := shared.NewIntent(t, l1ChainID, dk, l2ChainID, loc, loc, testCustomGasLimit) intent.SuperchainRoles = nil intent.OPCMAddress = &impls.Opcm @@ -149,13 +134,10 @@ func TestEndToEndBootstrapApply(t *testing.T) { } t.Run("default tagged artifacts", func(t *testing.T) { - op_e2e.InitParallel(t) - testutils.RunOnBranch(t, regexp.MustCompile(`^(backports/op-deployer|proposal/op-contracts)/*`)) apply(t, artifacts.DefaultL1ContractsLocator) }) t.Run("local artifacts", func(t *testing.T) { - op_e2e.InitParallel(t) loc, _ := testutil.LocalArtifacts(t) apply(t, loc) }) @@ -166,7 +148,7 @@ func TestEndToEndApply(t *testing.T) { lgr := testlog.Logger(t, slog.LevelDebug) l1RPC, l1Client := devnet.DefaultAnvilRPC(t, lgr) - _, pk, dk := defaultPrivkey(t) + _, pk, dk := shared.DefaultPrivkey(t) l1ChainID := new(big.Int).SetUint64(devnet.DefaultChainID) l2ChainID1 := uint256.NewInt(1) l2ChainID2 := uint256.NewInt(2) @@ -177,7 +159,7 @@ func TestEndToEndApply(t *testing.T) { defer cancel() t.Run("two chains one after another", func(t *testing.T) { - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + intent, st := shared.NewIntent(t, l1ChainID, dk, l2ChainID1, loc, loc, testCustomGasLimit) cg := ethClientCodeGetter(ctx, l1Client) require.NoError(t, deployer.ApplyPipeline( @@ -196,7 +178,7 @@ func TestEndToEndApply(t *testing.T) { // create a new environment with wiped state to ensure we can continue using the // state from the previous deployment - intent.Chains = append(intent.Chains, newChainIntent(t, dk, l1ChainID, l2ChainID2)) + intent.Chains = append(intent.Chains, shared.NewChainIntent(t, dk, l1ChainID, l2ChainID2, testCustomGasLimit)) require.NoError(t, deployer.ApplyPipeline( ctx, @@ -217,7 +199,7 @@ func TestEndToEndApply(t *testing.T) { }) t.Run("with calldata broadcasts and prestate generation", func(t *testing.T) { - intent, st := newIntent(t, l1ChainID, dk, l2ChainID1, loc, loc) + intent, st := shared.NewIntent(t, l1ChainID, dk, l2ChainID1, loc, loc, testCustomGasLimit) mockPreStateBuilder := devnet.NewMockPreStateBuilder() require.NoError(t, deployer.ApplyPipeline( @@ -253,7 +235,6 @@ func TestGlobalOverrides(t *testing.T) { defer cancel() opts, intent, st := setupGenesisChain(t, devnet.DefaultChainID) - expectedGasLimit := strings.ToLower("0x1C9C380") expectedBaseFeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000001") expectedL1FeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000002") expectedSequencerFeeVaultRecipient := common.HexToAddress("0x0000000000000000000000000000000000000003") @@ -265,7 +246,6 @@ func TestGlobalOverrides(t *testing.T) { expectedUseFaultProofs := false intent.GlobalDeployOverrides = map[string]interface{}{ "l2BlockTime": float64(3), - "l2GenesisBlockGasLimit": expectedGasLimit, "baseFeeVaultRecipient": expectedBaseFeeVaultRecipient, "l1FeeVaultRecipient": expectedL1FeeVaultRecipient, "sequencerFeeVaultRecipient": expectedSequencerFeeVaultRecipient, @@ -282,7 +262,6 @@ func TestGlobalOverrides(t *testing.T) { cfg, err := state.CombineDeployConfig(intent, intent.Chains[0], st, st.Chains[0]) require.NoError(t, err) require.Equal(t, uint64(3), cfg.L2InitializationConfig.L2CoreDeployConfig.L2BlockTime, "L2 block time should be 3 seconds") - require.Equal(t, expectedGasLimit, strings.ToLower(cfg.L2InitializationConfig.L2GenesisBlockDeployConfig.L2GenesisBlockGasLimit.String()), "L2 Genesis Block Gas Limit should be 30_000_000") require.Equal(t, expectedBaseFeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.BaseFeeVaultRecipient, "Base Fee Vault Recipient should be the expected address") require.Equal(t, expectedL1FeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.L1FeeVaultRecipient, "L1 Fee Vault Recipient should be the expected address") require.Equal(t, expectedSequencerFeeVaultRecipient, cfg.L2InitializationConfig.L2VaultsDeployConfig.SequencerFeeVaultRecipient, "Sequencer Fee Vault Recipient should be the expected address") @@ -648,7 +627,7 @@ func setupGenesisChain(t *testing.T, l1ChainID uint64) (deployer.ApplyPipelineOp loc, _ := testutil.LocalArtifacts(t) - intent, st := newIntent(t, l1ChainIDBig, dk, l2ChainID1, loc, loc) + intent, st := shared.NewIntent(t, l1ChainIDBig, dk, l2ChainID1, loc, loc, testCustomGasLimit) testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) @@ -665,63 +644,6 @@ func setupGenesisChain(t *testing.T, l1ChainID uint64) (deployer.ApplyPipelineOp return opts, intent, st } -func addrFor(t *testing.T, dk *devkeys.MnemonicDevKeys, key devkeys.Key) common.Address { - addr, err := dk.Address(key) - require.NoError(t, err) - return addr -} - -func newIntent( - t *testing.T, - l1ChainID *big.Int, - dk *devkeys.MnemonicDevKeys, - l2ChainID *uint256.Int, - l1Loc *artifacts.Locator, - l2Loc *artifacts.Locator, -) (*state.Intent, *state.State) { - intent := &state.Intent{ - ConfigType: state.IntentTypeCustom, - L1ChainID: l1ChainID.Uint64(), - SuperchainRoles: &addresses.SuperchainRoles{ - SuperchainProxyAdminOwner: addrFor(t, dk, devkeys.L1ProxyAdminOwnerRole.Key(l1ChainID)), - ProtocolVersionsOwner: addrFor(t, dk, devkeys.SuperchainDeployerKey.Key(l1ChainID)), - SuperchainGuardian: addrFor(t, dk, devkeys.SuperchainConfigGuardianKey.Key(l1ChainID)), - Challenger: addrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainID)), - }, - FundDevAccounts: false, - L1ContractsLocator: l1Loc, - L2ContractsLocator: l2Loc, - Chains: []*state.ChainIntent{ - newChainIntent(t, dk, l1ChainID, l2ChainID), - }, - } - st := &state.State{ - Version: 1, - } - return intent, st -} - -func newChainIntent(t *testing.T, dk *devkeys.MnemonicDevKeys, l1ChainID *big.Int, l2ChainID *uint256.Int) *state.ChainIntent { - return &state.ChainIntent{ - ID: l2ChainID.Bytes32(), - BaseFeeVaultRecipient: addrFor(t, dk, devkeys.BaseFeeVaultRecipientRole.Key(l1ChainID)), - L1FeeVaultRecipient: addrFor(t, dk, devkeys.L1FeeVaultRecipientRole.Key(l1ChainID)), - SequencerFeeVaultRecipient: addrFor(t, dk, devkeys.SequencerFeeVaultRecipientRole.Key(l1ChainID)), - Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, - Eip1559Denominator: standard.Eip1559Denominator, - Eip1559Elasticity: standard.Eip1559Elasticity, - Roles: state.ChainRoles{ - L1ProxyAdminOwner: addrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), - L2ProxyAdminOwner: addrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), - SystemConfigOwner: addrFor(t, dk, devkeys.SystemConfigOwner.Key(l1ChainID)), - UnsafeBlockSigner: addrFor(t, dk, devkeys.SequencerP2PRole.Key(l1ChainID)), - Batcher: addrFor(t, dk, devkeys.BatcherRole.Key(l1ChainID)), - Proposer: addrFor(t, dk, devkeys.ProposerRole.Key(l1ChainID)), - Challenger: addrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainID)), - }, - } -} - type codeGetter func(t *testing.T, addr common.Address) []byte func ethClientCodeGetter(ctx context.Context, client *ethclient.Client) codeGetter { @@ -777,6 +699,7 @@ func validateOPChainDeployment(t *testing.T, cg codeGetter, st *state.State, int implAddrs := []addrTuple{ {"DelayedWethImpl", st.ImplementationsDeployment.DelayedWethImpl}, {"OptimismPortalImpl", st.ImplementationsDeployment.OptimismPortalImpl}, + {"OptimismPortalInteropImpl", st.ImplementationsDeployment.OptimismPortalInteropImpl}, {"SystemConfigImpl", st.ImplementationsDeployment.SystemConfigImpl}, {"L1CrossDomainMessengerImpl", st.ImplementationsDeployment.L1CrossDomainMessengerImpl}, {"L1ERC721BridgeImpl", st.ImplementationsDeployment.L1Erc721BridgeImpl}, @@ -787,10 +710,6 @@ func validateOPChainDeployment(t *testing.T, cg codeGetter, st *state.State, int {"PreimageOracleImpl", st.ImplementationsDeployment.PreimageOracleImpl}, } - if !intent.L1ContractsLocator.IsTag() { - implAddrs = append(implAddrs, addrTuple{"EthLockboxImpl", st.ImplementationsDeployment.EthLockboxImpl}) - } - for _, addr := range implAddrs { require.NotEmpty(t, addr.addr, "%s should be set", addr.name) code := cg(t, addr.addr) @@ -850,6 +769,12 @@ func validateOPChainDeployment(t *testing.T, cg codeGetter, st *state.State, int require.False(t, ok, "governance token should not be deployed by default") } + genesis, rollup, err := inspect.GenesisAndRollup(st, chainState.ID) + require.NoError(t, err) + require.Equal(t, rollup.Genesis.SystemConfig.GasLimit, testCustomGasLimit, "rollup gasLimit") + require.Equal(t, genesis.GasLimit, testCustomGasLimit, "genesis gasLimit") + + require.Equal(t, chainIntent.GasLimit, testCustomGasLimit, "chainIntent gasLimit") require.Equal(t, int(chainIntent.Eip1559Denominator), 50, "EIP1559Denominator should be set") require.Equal(t, int(chainIntent.Eip1559Elasticity), 6, "EIP1559Elasticity should be set") } diff --git a/op-deployer/pkg/deployer/integration_test/cli/cli_runner.go b/op-deployer/pkg/deployer/integration_test/cli/cli_runner.go new file mode 100644 index 0000000000000..573c16482e0fa --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/cli_runner.go @@ -0,0 +1,151 @@ +package cli + +import ( + "bytes" + "context" + "log/slog" + "os" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/integration_test/shared" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils/devnet" + "github.com/stretchr/testify/require" +) + +// CLITestRunner provides utilities for running op-deployer CLI commands in tests +type CLITestRunner struct { + workDir string + l1RPC string + privateKeyHex string +} + +// NewCLITestRunner creates a new CLI test runner +func NewCLITestRunner(t *testing.T) *CLITestRunner { + // Create a temporary working directory for tests + workDir := t.TempDir() + + return &CLITestRunner{ + workDir: workDir, + } +} + +// NewCLITestRunnerWithNetwork creates a new CLI test runner with network setup +func NewCLITestRunnerWithNetwork(t *testing.T) *CLITestRunner { + workDir := t.TempDir() + + lgr := testlog.Logger(t, slog.LevelDebug) + l1RPC, _ := devnet.DefaultAnvilRPC(t, lgr) + + // Get private key + pkHex, _, _ := shared.DefaultPrivkey(t) + + return &CLITestRunner{ + workDir: workDir, + l1RPC: l1RPC, + privateKeyHex: pkHex, + } +} + +// GetWorkDir returns the working directory for this test runner +func (r *CLITestRunner) GetWorkDir() string { + return r.workDir +} + +// captureOutputWriter captures output written to it for testing +type captureOutputWriter struct { + buf *bytes.Buffer +} + +func (w *captureOutputWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +func newCaptureOutputWriter() *captureOutputWriter { + return &captureOutputWriter{buf: &bytes.Buffer{}} +} + +// Run executes a CLI command and returns the output +func (r *CLITestRunner) Run(ctx context.Context, args []string, env map[string]string) (string, error) { + // Set up environment variables + for key, value := range env { + os.Setenv(key, value) + defer os.Unsetenv(key) + } + + // Change to the working directory for the test + originalDir, err := os.Getwd() + if err != nil { + return "", err + } + defer func() { + _ = os.Chdir(originalDir) + }() + + if err := os.Chdir(r.workDir); err != nil { + return "", err + } + + // Capture output + stdout := newCaptureOutputWriter() + stderr := newCaptureOutputWriter() + + // Add "op-deployer" as the first argument if not already present + fullArgs := args + if len(args) == 0 || args[0] != "op-deployer" { + fullArgs = append([]string{"op-deployer"}, args...) + } + + // Run the CLI command using the testable interface + err = RunCLI(ctx, stdout, stderr, fullArgs) + output := stdout.buf.String() + stderr.buf.String() + if err != nil { + return output, err + } + + return output, nil +} + +// RunWithNetwork executes a CLI command with network parameters if available +func (r *CLITestRunner) RunWithNetwork(ctx context.Context, args []string, env map[string]string) (string, error) { + if r.l1RPC != "" { + args = append(args, "--l1-rpc-url", r.l1RPC) + } + if r.privateKeyHex != "" { + args = append(args, "--private-key", r.privateKeyHex) + } + + return r.Run(ctx, args, env) +} + +// ExpectSuccess runs a command expecting it to succeed +func (r *CLITestRunner) ExpectSuccess(t *testing.T, args []string, env map[string]string) string { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + output, err := r.Run(ctx, args, env) + require.NoError(t, err, "Command failed: %s", output) + return output +} + +// ExpectSuccessWithNetwork runs a command with network parameters expecting it to succeed +func (r *CLITestRunner) ExpectSuccessWithNetwork(t *testing.T, args []string, env map[string]string) string { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + output, err := r.RunWithNetwork(ctx, args, env) + require.NoError(t, err, "Command failed: %s", output) + return output +} + +// ExpectErrorContains runs a command expecting it to fail with specific error text +func (r *CLITestRunner) ExpectErrorContains(t *testing.T, args []string, env map[string]string, contains string) string { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + output, err := r.Run(ctx, args, env) + require.Error(t, err, "Expected command to fail but it succeeded") + require.Contains(t, output, contains, "Error message should contain expected text") + return output +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/cli_runner_test.go b/op-deployer/pkg/deployer/integration_test/cli/cli_runner_test.go new file mode 100644 index 0000000000000..f82f901a67e5c --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/cli_runner_test.go @@ -0,0 +1,17 @@ +package cli + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestCLITestRunnerSmoke tests the CLITestRunner itself +func TestCLITestRunnerSmoke(t *testing.T) { + runner := NewCLITestRunner(t) + + require.DirExists(t, runner.GetWorkDir()) + + // Test basic command execution + runner.ExpectSuccess(t, []string{"--help"}, nil) +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/cli_testable.go b/op-deployer/pkg/deployer/integration_test/cli/cli_testable.go new file mode 100644 index 0000000000000..d8509aeb5fc85 --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/cli_testable.go @@ -0,0 +1,21 @@ +package cli + +import ( + "context" + "fmt" + "io" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/cli" +) + +// RunCLI provides a testable interface for running the op-deployer CLI +func RunCLI(ctx context.Context, w io.Writer, ew io.Writer, args []string) error { + app := cli.NewApp("v0.0.0-test") + app.Writer = w + app.ErrWriter = ew + err := app.RunContext(ctx, args) + if err != nil { + _, _ = fmt.Fprintf(ew, "Application failed: %v\n", err) + } + return err +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/command_test.go b/op-deployer/pkg/deployer/integration_test/cli/command_test.go new file mode 100644 index 0000000000000..d568a6663f98f --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/command_test.go @@ -0,0 +1,255 @@ +package cli + +import ( + "path/filepath" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/stretchr/testify/require" +) + +func TestInitCommandFlagValidation(t *testing.T) { + runner := NewCLITestRunner(t) + + tests := []struct { + name string + args []string + expectError bool + expectContains []string + }{ + { + name: "valid init command", + args: []string{"init", "--l1-chain-id", "11155111", "--l2-chain-ids", "123"}, + expectError: false, + expectContains: []string{}, // Success is validated by lack of error + }, + { + name: "missing required l2-chain-ids", + args: []string{"init", "--l1-chain-id", "11155111"}, + expectError: true, + expectContains: []string{"must specify at least one L2 chain ID"}, + }, + { + name: "invalid l1-chain-id format", + args: []string{"init", "--l1-chain-id", "invalid", "--l2-chain-ids", "123"}, + expectError: true, + expectContains: []string{"invalid value \"invalid\" for flag -l1-chain-id: parse error"}, + }, + { + name: "invalid intent-type", + args: []string{"init", "--l1-chain-id", "11155111", "--l2-chain-ids", "123", "--intent-type", "invalid"}, + expectError: true, + expectContains: []string{"intent type not supported: invalid"}, + }, + { + name: "valid intent-type custom", + args: []string{"init", "--l1-chain-id", "11155111", "--l2-chain-ids", "123", "--intent-type", "custom"}, + expectError: false, + expectContains: []string{}, // Success is validated by lack of error + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + workDir := runner.GetWorkDir() + args := append(tt.args, "--workdir", workDir) + + if tt.expectError { + runner.ExpectErrorContains(t, args, nil, strings.Join(tt.expectContains, " ")) + } else { + runner.ExpectSuccess(t, args, nil) + // For successful cases, validate output if expectContains is not empty + if len(tt.expectContains) > 0 { + output := runner.ExpectSuccess(t, args, nil) + for _, expected := range tt.expectContains { + require.Contains(t, output, expected, "Output should contain expected string: %s", expected) + } + } + } + }) + } +} + +func TestApplyCommandFlagValidation(t *testing.T) { + runner := NewCLITestRunner(t) + + tests := []struct { + name string + args []string + expectError bool + expectContains []string + }{ + { + name: "invalid deployment target", + args: []string{"apply", "--deployment-target", "invalid"}, + expectError: true, + expectContains: []string{"failed to parse deployment target: invalid deployment target: invalid"}, + }, + { + name: "valid deployment targets", + args: []string{"apply", "--deployment-target", "live"}, + expectError: true, // Will fail because no RPC URL, but tests flag parsing + expectContains: []string{"l1 RPC URL must be specified"}, + }, + { + name: "valid noop deployment target", + args: []string{"apply", "--deployment-target", "noop"}, + expectError: true, // Will fail because no intent file, but tests flag parsing + expectContains: []string{"failed to read intent file"}, + }, + { + name: "valid calldata deployment target", + args: []string{"apply", "--deployment-target", "calldata"}, + expectError: true, // Will fail because no intent file, but tests flag parsing + expectContains: []string{"failed to read intent file"}, + }, + { + name: "valid genesis deployment target", + args: []string{"apply", "--deployment-target", "genesis"}, + expectError: true, // Will fail because no intent file, but tests flag parsing + expectContains: []string{"failed to read intent file"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + workDir := runner.GetWorkDir() + args := append(tt.args, "--workdir", workDir) + + if tt.expectError { + runner.ExpectErrorContains(t, args, nil, strings.Join(tt.expectContains, " ")) + } else { + runner.ExpectSuccess(t, args, nil) + // For successful cases, validate output if expectContains is not empty + if len(tt.expectContains) > 0 { + output := runner.ExpectSuccess(t, args, nil) + for _, expected := range tt.expectContains { + require.Contains(t, output, expected, "Output should contain expected string: %s", expected) + } + } + } + }) + } +} + +func TestGlobalFlagValidation(t *testing.T) { + runner := NewCLITestRunner(t) + + tests := []struct { + name string + args []string + expectError bool + expectContains []string + }{ + { + name: "invalid global flag", + args: []string{"--invalid-global-flag", "init", "--l1-chain-id", "11155111", "--l2-chain-ids", "123"}, + expectError: true, + expectContains: []string{"flag provided but not defined: -invalid-global-flag"}, + }, + { + name: "valid cache dir flag", + args: []string{"--cache-dir", "/tmp/cache", "init", "--l1-chain-id", "11155111", "--l2-chain-ids", "123"}, + expectError: false, + expectContains: []string{}, // Success is validated by lack of error + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + workDir := runner.GetWorkDir() + args := append(tt.args, "--workdir", workDir) + + if tt.expectError { + runner.ExpectErrorContains(t, args, nil, strings.Join(tt.expectContains, " ")) + } else { + runner.ExpectSuccess(t, args, nil) + // For successful cases, validate output if expectContains is not empty + if len(tt.expectContains) > 0 { + output := runner.ExpectSuccess(t, args, nil) + for _, expected := range tt.expectContains { + require.Contains(t, output, expected, "Output should contain expected string: %s", expected) + } + } + } + }) + } +} + +func TestCommandParsing(t *testing.T) { + runner := NewCLITestRunner(t) + + tests := []struct { + name string + args []string + expectError bool + expectContains []string + }{ + { + name: "help command", + args: []string{"--help"}, + expectError: false, + expectContains: []string{"op-deployer", "Tool to configure and deploy OP Chains"}, // From cli/app.go app.Usage + }, + { + name: "version command", + args: []string{"--version"}, + expectError: false, + expectContains: []string{"op-deployer", "v0.0.0"}, // Version should contain app name and version format + }, + { + name: "no command", + args: []string{}, + expectError: false, // Shows help + expectContains: []string{"op-deployer", "Tool to configure and deploy OP Chains"}, // From cli/app.go app.Usage + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.expectError { + runner.ExpectErrorContains(t, tt.args, nil, strings.Join(tt.expectContains, " ")) + } else { + output := runner.ExpectSuccess(t, tt.args, nil) + // Verify expected content in output + for _, expected := range tt.expectContains { + require.Contains(t, output, expected, "Output should contain expected string: %s", expected) + } + } + }) + } +} + +// TestCLIApplyMissingIntent tests apply when intent.toml is missing (uses real CLI binary) +func TestCLIApplyMissingIntent(t *testing.T) { + runner := NewCLITestRunner(t) + + workDir := runner.GetWorkDir() + + runner.ExpectErrorContains(t, []string{ + "apply", + "--deployment-target", "noop", + "--workdir", workDir, + }, nil, "failed to read intent file") +} + +// TestCLIApplyMissingState tests apply when state.json is missing (uses real CLI binary) +func TestCLIApplyMissingState(t *testing.T) { + runner := NewCLITestRunner(t) + + workDir := runner.GetWorkDir() + + // Create intent.toml but not state.json + intent := &state.Intent{ + ConfigType: state.IntentTypeCustom, + L1ChainID: 11155111, + } + require.NoError(t, intent.WriteToFile(filepath.Join(workDir, "intent.toml"))) + + runner.ExpectErrorContains(t, []string{ + "apply", + "--deployment-target", "noop", + "--workdir", workDir, + }, nil, "failed to read state file") +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/e2e_apply_test.go b/op-deployer/pkg/deployer/integration_test/cli/e2e_apply_test.go new file mode 100644 index 0000000000000..49485751fa2e8 --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/e2e_apply_test.go @@ -0,0 +1,100 @@ +package cli + +import ( + "math/big" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/integration_test/shared" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-service/testutils/devnet" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// TestCLIEndToEndApply tests the full end-to-end apply workflow via CLI +func TestCLIEndToEndApply(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + + workDir := runner.GetWorkDir() + // Use the same chain ID that anvil runs on + l1ChainID := uint64(devnet.DefaultChainID) + l2ChainID1 := uint256.NewInt(1) + l2ChainID2 := uint256.NewInt(2) + + dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(t, err) + + t.Run("two chains one after another", func(t *testing.T) { + intent, _ := cliInitIntent(t, runner, l1ChainID, []common.Hash{l2ChainID1.Bytes32()}) + + if intent.SuperchainRoles == nil { + t.Log("SuperchainRoles is nil, initializing...") + intent.SuperchainRoles = &addresses.SuperchainRoles{} + } + + l1ChainIDBig := big.NewInt(int64(l1ChainID)) + intent.SuperchainRoles.SuperchainProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L1ProxyAdminOwnerRole.Key(l1ChainIDBig)) + intent.SuperchainRoles.SuperchainGuardian = shared.AddrFor(t, dk, devkeys.SuperchainConfigGuardianKey.Key(l1ChainIDBig)) + intent.SuperchainRoles.ProtocolVersionsOwner = shared.AddrFor(t, dk, devkeys.SuperchainDeployerKey.Key(l1ChainIDBig)) + intent.SuperchainRoles.Challenger = shared.AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainIDBig)) + + for _, chain := range intent.Chains { + chain.Roles.L1ProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainIDBig)) + chain.Roles.L2ProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainIDBig)) + chain.Roles.SystemConfigOwner = shared.AddrFor(t, dk, devkeys.SystemConfigOwner.Key(l1ChainIDBig)) + chain.Roles.UnsafeBlockSigner = shared.AddrFor(t, dk, devkeys.SequencerP2PRole.Key(l1ChainIDBig)) + chain.Roles.Batcher = shared.AddrFor(t, dk, devkeys.BatcherRole.Key(l1ChainIDBig)) + chain.Roles.Proposer = shared.AddrFor(t, dk, devkeys.ProposerRole.Key(l1ChainIDBig)) + chain.Roles.Challenger = shared.AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainIDBig)) + + chain.BaseFeeVaultRecipient = shared.AddrFor(t, dk, devkeys.BaseFeeVaultRecipientRole.Key(l1ChainIDBig)) + chain.L1FeeVaultRecipient = shared.AddrFor(t, dk, devkeys.L1FeeVaultRecipientRole.Key(l1ChainIDBig)) + chain.SequencerFeeVaultRecipient = shared.AddrFor(t, dk, devkeys.SequencerFeeVaultRecipientRole.Key(l1ChainIDBig)) + + chain.Eip1559DenominatorCanyon = standard.Eip1559DenominatorCanyon + chain.Eip1559Denominator = standard.Eip1559Denominator + chain.Eip1559Elasticity = standard.Eip1559Elasticity + } + require.NoError(t, intent.WriteToFile(filepath.Join(workDir, "intent.toml"))) + + // Apply first chain with live deployment + runner.ExpectSuccessWithNetwork(t, []string{ + "apply", + "--deployment-target", "live", + "--workdir", workDir, + }, nil) + + // Add second chain to intent + intent, err := pipeline.ReadIntent(workDir) + require.NoError(t, err) + + secondChain := shared.NewChainIntent(t, dk, new(big.Int).SetUint64(l1ChainID), l2ChainID2, 60_000_000) + secondChain.Eip1559DenominatorCanyon = standard.Eip1559DenominatorCanyon + secondChain.Eip1559Denominator = standard.Eip1559Denominator + secondChain.Eip1559Elasticity = standard.Eip1559Elasticity + intent.Chains = append(intent.Chains, secondChain) + + require.NoError(t, intent.WriteToFile(filepath.Join(workDir, "intent.toml"))) + + // Apply again with both chains + runner.ExpectSuccessWithNetwork(t, []string{ + "apply", + "--deployment-target", "live", + "--workdir", workDir, + }, nil) + + // Verify final state + finalState, err := pipeline.ReadState(workDir) + require.NoError(t, err) + require.Len(t, finalState.Chains, 2) + require.Equal(t, common.Hash(l2ChainID1.Bytes32()), finalState.Chains[0].ID) + require.Equal(t, common.Hash(l2ChainID2.Bytes32()), finalState.Chains[1].ID) + + require.NotNil(t, finalState.AppliedIntent) + }) +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/init_test.go b/op-deployer/pkg/deployer/integration_test/cli/init_test.go new file mode 100644 index 0000000000000..d7dddb274444c --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/init_test.go @@ -0,0 +1,90 @@ +package cli + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// TestCLIInit tests basic init command and file creation +func TestCLIInit(t *testing.T) { + runner := NewCLITestRunner(t) + + workDir := runner.GetWorkDir() + + runner.ExpectSuccess(t, []string{ + "init", + "--l1-chain-id", "11155111", + "--l2-chain-ids", "1", + "--workdir", workDir, + }, nil) + + // Verify intent.toml was created and has correct content + intent, err := pipeline.ReadIntent(workDir) + require.NoError(t, err) + require.Equal(t, uint64(11155111), intent.L1ChainID) + require.Len(t, intent.Chains, 1) + require.Equal(t, common.Hash(uint256.NewInt(1).Bytes32()), intent.Chains[0].ID) + + // Verify state.json was created (chains get populated during apply, not init) + st, err := pipeline.ReadState(workDir) + require.NoError(t, err) + // State starts empty and gets populated during apply + require.Len(t, st.Chains, 0) +} + +// TestCLIInitMultipleChains tests init with multiple L2 chain IDs +func TestCLIInitMultipleChains(t *testing.T) { + runner := NewCLITestRunner(t) + + workDir := runner.GetWorkDir() + + runner.ExpectSuccess(t, []string{ + "init", + "--l1-chain-id", "11155111", + "--l2-chain-ids", "1,2", + "--workdir", workDir, + }, nil) + + intent, err := pipeline.ReadIntent(workDir) + require.NoError(t, err) + require.Equal(t, uint64(11155111), intent.L1ChainID) + require.Len(t, intent.Chains, 2) + require.Equal(t, common.Hash(uint256.NewInt(1).Bytes32()), intent.Chains[0].ID) + require.Equal(t, common.Hash(uint256.NewInt(2).Bytes32()), intent.Chains[1].ID) + + // State starts empty and gets populated during apply + st, err := pipeline.ReadState(workDir) + require.NoError(t, err) + require.Len(t, st.Chains, 0) +} + +// TestCLIInitCustomIntentType tests init with custom intent type +func TestCLIInitCustomIntentType(t *testing.T) { + runner := NewCLITestRunner(t) + + workDir := runner.GetWorkDir() + + runner.ExpectSuccess(t, []string{ + "init", + "--l1-chain-id", "11155111", + "--l2-chain-ids", "1", + "--intent-type", "custom", + "--workdir", workDir, + }, nil) + + intent, err := pipeline.ReadIntent(workDir) + require.NoError(t, err) + require.Equal(t, state.IntentTypeCustom, intent.ConfigType) + require.Equal(t, uint64(11155111), intent.L1ChainID) + require.Len(t, intent.Chains, 1) + + // State starts empty and gets populated during apply + st, err := pipeline.ReadState(workDir) + require.NoError(t, err) + require.Len(t, st.Chains, 0) +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/noop_test.go b/op-deployer/pkg/deployer/integration_test/cli/noop_test.go new file mode 100644 index 0000000000000..5aee016f02351 --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/noop_test.go @@ -0,0 +1,65 @@ +package cli + +import ( + "math/big" + "path/filepath" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/integration_test/shared" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-service/testutils/devnet" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// TestCLIApplyNoOp tests apply with noop target +func TestCLIApplyNoOp(t *testing.T) { + runner := NewCLITestRunnerWithNetwork(t) + + workDir := runner.GetWorkDir() + + intent, _ := cliInitIntent(t, runner, devnet.DefaultChainID, []common.Hash{uint256.NewInt(1).Bytes32()}) + + if intent.SuperchainRoles == nil { + t.Log("SuperchainRoles is nil, initializing...") + intent.SuperchainRoles = &addresses.SuperchainRoles{} + } + + dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(t, err) + + l1ChainIDBig := big.NewInt(devnet.DefaultChainID) + intent.SuperchainRoles.SuperchainProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L1ProxyAdminOwnerRole.Key(l1ChainIDBig)) + intent.SuperchainRoles.SuperchainGuardian = shared.AddrFor(t, dk, devkeys.SuperchainConfigGuardianKey.Key(l1ChainIDBig)) + intent.SuperchainRoles.ProtocolVersionsOwner = shared.AddrFor(t, dk, devkeys.SuperchainDeployerKey.Key(l1ChainIDBig)) + intent.SuperchainRoles.Challenger = shared.AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainIDBig)) + + // Set chain-specific addresses + for _, chain := range intent.Chains { + chain.Roles.L1ProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainIDBig)) + chain.Roles.L2ProxyAdminOwner = shared.AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainIDBig)) + chain.Roles.SystemConfigOwner = shared.AddrFor(t, dk, devkeys.SystemConfigOwner.Key(l1ChainIDBig)) + chain.Roles.UnsafeBlockSigner = shared.AddrFor(t, dk, devkeys.SequencerP2PRole.Key(l1ChainIDBig)) + chain.Roles.Batcher = shared.AddrFor(t, dk, devkeys.BatcherRole.Key(l1ChainIDBig)) + chain.Roles.Proposer = shared.AddrFor(t, dk, devkeys.ProposerRole.Key(l1ChainIDBig)) + chain.Roles.Challenger = shared.AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainIDBig)) + + chain.BaseFeeVaultRecipient = shared.AddrFor(t, dk, devkeys.BaseFeeVaultRecipientRole.Key(l1ChainIDBig)) + chain.L1FeeVaultRecipient = shared.AddrFor(t, dk, devkeys.L1FeeVaultRecipientRole.Key(l1ChainIDBig)) + chain.SequencerFeeVaultRecipient = shared.AddrFor(t, dk, devkeys.SequencerFeeVaultRecipientRole.Key(l1ChainIDBig)) + + chain.Eip1559DenominatorCanyon = standard.Eip1559DenominatorCanyon + chain.Eip1559Denominator = standard.Eip1559Denominator + chain.Eip1559Elasticity = standard.Eip1559Elasticity + } + require.NoError(t, intent.WriteToFile(filepath.Join(workDir, "intent.toml"))) + + runner.ExpectSuccessWithNetwork(t, []string{ + "apply", + "--deployment-target", "noop", + "--workdir", workDir, + }, nil) +} diff --git a/op-deployer/pkg/deployer/integration_test/cli/shared.go b/op-deployer/pkg/deployer/integration_test/cli/shared.go new file mode 100644 index 0000000000000..fca87a08daabd --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/cli/shared.go @@ -0,0 +1,40 @@ +package cli + +import ( + "fmt" + "strings" + "testing" + + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum/go-ethereum/common" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// cliInitIntent creates an intent using the CLI init command +func cliInitIntent(t *testing.T, runner *CLITestRunner, l1ChainID uint64, l2ChainIDs []common.Hash) (*state.Intent, *state.State) { + workDir := runner.GetWorkDir() + + chainIDStrings := make([]string, len(l2ChainIDs)) + for i, id := range l2ChainIDs { + chainIDStrings[i] = new(uint256.Int).SetBytes32(id[:]).String() + } + l2ChainIDsStr := strings.Join(chainIDStrings, ",") + + runner.ExpectSuccess(t, []string{ + "init", + "--l1-chain-id", fmt.Sprintf("%d", l1ChainID), + "--l2-chain-ids", l2ChainIDsStr, + "--intent-type", "custom", + "--workdir", workDir, + }, nil) + + intent, err := pipeline.ReadIntent(workDir) + require.NoError(t, err) + + st, err := pipeline.ReadState(workDir) + require.NoError(t, err) + + return intent, st +} diff --git a/op-deployer/pkg/deployer/integration_test/shared/shared.go b/op-deployer/pkg/deployer/integration_test/shared/shared.go new file mode 100644 index 0000000000000..296d56104a717 --- /dev/null +++ b/op-deployer/pkg/deployer/integration_test/shared/shared.go @@ -0,0 +1,89 @@ +package shared + +import ( + "crypto/ecdsa" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/holiman/uint256" + "github.com/stretchr/testify/require" +) + +// AddrFor generates an address for a given role +func AddrFor(t *testing.T, dk *devkeys.MnemonicDevKeys, key devkeys.Key) common.Address { + addr, err := dk.Address(key) + require.NoError(t, err) + return addr +} + +func NewChainIntent(t *testing.T, dk *devkeys.MnemonicDevKeys, l1ChainID *big.Int, l2ChainID *uint256.Int, gasLimit uint64) *state.ChainIntent { + return &state.ChainIntent{ + ID: l2ChainID.Bytes32(), + BaseFeeVaultRecipient: AddrFor(t, dk, devkeys.BaseFeeVaultRecipientRole.Key(l1ChainID)), + L1FeeVaultRecipient: AddrFor(t, dk, devkeys.L1FeeVaultRecipientRole.Key(l1ChainID)), + SequencerFeeVaultRecipient: AddrFor(t, dk, devkeys.SequencerFeeVaultRecipientRole.Key(l1ChainID)), + Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, + Eip1559Denominator: standard.Eip1559Denominator, + Eip1559Elasticity: standard.Eip1559Elasticity, + GasLimit: gasLimit, + Roles: state.ChainRoles{ + L1ProxyAdminOwner: AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), + L2ProxyAdminOwner: AddrFor(t, dk, devkeys.L2ProxyAdminOwnerRole.Key(l1ChainID)), + SystemConfigOwner: AddrFor(t, dk, devkeys.SystemConfigOwner.Key(l1ChainID)), + UnsafeBlockSigner: AddrFor(t, dk, devkeys.SequencerP2PRole.Key(l1ChainID)), + Batcher: AddrFor(t, dk, devkeys.BatcherRole.Key(l1ChainID)), + Proposer: AddrFor(t, dk, devkeys.ProposerRole.Key(l1ChainID)), + Challenger: AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainID)), + }, + } +} + +func NewIntent( + t *testing.T, + l1ChainID *big.Int, + dk *devkeys.MnemonicDevKeys, + l2ChainID *uint256.Int, + l1Loc *artifacts.Locator, + l2Loc *artifacts.Locator, + gasLimit uint64, +) (*state.Intent, *state.State) { + intent := &state.Intent{ + ConfigType: state.IntentTypeCustom, + L1ChainID: l1ChainID.Uint64(), + SuperchainRoles: &addresses.SuperchainRoles{ + SuperchainProxyAdminOwner: AddrFor(t, dk, devkeys.L1ProxyAdminOwnerRole.Key(l1ChainID)), + ProtocolVersionsOwner: AddrFor(t, dk, devkeys.SuperchainDeployerKey.Key(l1ChainID)), + SuperchainGuardian: AddrFor(t, dk, devkeys.SuperchainConfigGuardianKey.Key(l1ChainID)), + Challenger: AddrFor(t, dk, devkeys.ChallengerRole.Key(l1ChainID)), + }, + FundDevAccounts: false, + L1ContractsLocator: l1Loc, + L2ContractsLocator: l2Loc, + Chains: []*state.ChainIntent{ + NewChainIntent(t, dk, l1ChainID, l2ChainID, gasLimit), + }, + } + st := &state.State{ + Version: 1, + } + return intent, st +} + +// DefaultPrivkey returns the default private key for testing +func DefaultPrivkey(t *testing.T) (string, *ecdsa.PrivateKey, *devkeys.MnemonicDevKeys) { + pkHex := "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + pk, err := crypto.HexToECDSA(pkHex) + require.NoError(t, err) + + dk, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(t, err) + + return pkHex, pk, dk +} diff --git a/op-deployer/pkg/deployer/integration_test/standard_test.go b/op-deployer/pkg/deployer/integration_test/standard_test.go deleted file mode 100644 index e03f7533fbf10..0000000000000 --- a/op-deployer/pkg/deployer/integration_test/standard_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package integration - -import ( - "context" - "net/http" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" - "github.com/stretchr/testify/require" -) - -// TestContractArtifactsIntegrity checks that the artifacts exist on GCP and are valid. Since the -// artifacts are large, this test is skipped in short mode to preserve bandwidth. -func TestContractArtifactsIntegrity(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - for _, tag := range standard.AllTags() { - t.Run(tag, func(t *testing.T) { - t.Parallel() - - _, err := artifacts.Download( - context.Background(), - &artifacts.Locator{Tag: tag}, - artifacts.NoopProgressor(), - t.TempDir(), - ) - require.NoError(t, err) - }) - } -} - -// TestContractArtifactsExistence checks that the artifacts exist on GCP. It does not download them. -// As a result, this test does not validate the integrity of the artifacts. -func TestContractArtifactsExistence(t *testing.T) { - for _, tag := range standard.AllTags() { - t.Run(tag, func(t *testing.T) { - t.Parallel() - - url, err := standard.ArtifactsURLForTag(tag) - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - req, err := http.NewRequestWithContext(ctx, http.MethodHead, url.String(), nil) - require.NoError(t, err) - - resp, err := http.DefaultClient.Do(req) - require.NoError(t, err) - defer resp.Body.Close() - require.Equal(t, http.StatusOK, resp.StatusCode) - }) - } -} diff --git a/op-deployer/pkg/deployer/manage/add_game_type.go b/op-deployer/pkg/deployer/manage/add_game_type.go index 7ff617dbdadbc..a7280b20fbe65 100644 --- a/op-deployer/pkg/deployer/manage/add_game_type.go +++ b/op-deployer/pkg/deployer/manage/add_game_type.go @@ -7,6 +7,8 @@ import ( "math/big" "os" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-service/cliutil" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" @@ -246,7 +248,7 @@ func AddGameType(ctx context.Context, cfg AddGameTypeConfig) (opcm.AddGameTypeOu lgr := cfg.Logger - artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, artifacts.BarProgressor(), cfg.CacheDir) + artifactsFS, err := artifacts.Download(ctx, cfg.ArtifactsLocator, ioutil.BarProgressor(), cfg.CacheDir) if err != nil { return output, nil, fmt.Errorf("failed to download artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/manage/add_game_type_test.go b/op-deployer/pkg/deployer/manage/add_game_type_test.go index b474b07c02b03..b19d51ae6ee32 100644 --- a/op-deployer/pkg/deployer/manage/add_game_type_test.go +++ b/op-deployer/pkg/deployer/manage/add_game_type_test.go @@ -12,6 +12,8 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/lmittmann/w3" + "github.com/lmittmann/w3/module/eth" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum/go-ethereum/superchain" @@ -23,6 +25,44 @@ import ( "github.com/urfave/cli/v2" ) +// getAddressesOnchain reads addresses from on-chain contracts (using chainConfig to get entrypoints) +func getAddressesOnchain(ctx context.Context, rpcURL string, chainConfig *superchain.ChainConfig) (opChainProxyAdmin, delayedWETHProxy common.Address, err error) { + var proxyAdminFn = w3.MustNewFunc("proxyAdmin()", "address") + var gameImplsFn = w3.MustNewFunc("gameImpls(uint32)", "address") + var wethFn = w3.MustNewFunc("weth()", "address") + + client, err := w3.Dial(rpcURL) + if err != nil { + return common.Address{}, common.Address{}, fmt.Errorf("failed to connect to RPC: %w", err) + } + defer client.Close() + + systemConfigProxy := *chainConfig.Addresses.SystemConfigProxy + disputeGameFactoryProxy := *chainConfig.Addresses.DisputeGameFactoryProxy + + // Read OPChainProxyAdmin from systemConfigProxy.proxyAdmin() + err = client.CallCtx(ctx, eth.CallFunc(systemConfigProxy, proxyAdminFn).Returns(&opChainProxyAdmin)) + if err != nil { + return common.Address{}, common.Address{}, fmt.Errorf("failed to read proxyAdmin from SystemConfig: %w", err) + } + + // Read permissionless dispute game address from disputeGameFactoryProxy.gameImpls(0) + // GameTypes.CANNON = 0 (permissionless) + var permissionlessDisputeGame common.Address + err = client.CallCtx(ctx, eth.CallFunc(disputeGameFactoryProxy, gameImplsFn, uint32(0)).Returns(&permissionlessDisputeGame)) + if err != nil { + return common.Address{}, common.Address{}, fmt.Errorf("failed to read gameImpls(0) from DisputeGameFactory: %w", err) + } + + // Read DelayedWETHProxy from permissionlessDisputeGame.weth() + err = client.CallCtx(ctx, eth.CallFunc(permissionlessDisputeGame, wethFn).Returns(&delayedWETHProxy)) + if err != nil { + return common.Address{}, common.Address{}, fmt.Errorf("failed to read weth from permissionless dispute game: %w", err) + } + + return opChainProxyAdmin, delayedWETHProxy, nil +} + func TestAddGameType(t *testing.T) { rpcURL := os.Getenv("SEPOLIA_RPC_URL") require.NotEmpty(t, rpcURL, "must specify RPC url via SEPOLIA_RPC_URL env var") @@ -31,20 +71,26 @@ func TestAddGameType(t *testing.T) { v200SepoliaAddrs := validation.StandardVersionsSepolia[standard.ContractsV200Tag] testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) - supChain, err := superchain.GetChain(11155420) + chain, err := superchain.GetChain(11155420) require.NoError(t, err) - supChainConfig, err := supChain.Config() + chainConfig, err := chain.Config() require.NoError(t, err) + readCtx, readCancel := context.WithTimeout(context.Background(), 10*time.Second) + defer readCancel() + + opChainProxyAdmin, delayedWETHProxy, err := getAddressesOnchain(readCtx, rpcURL, chainConfig) + require.NoError(t, err, "failed to read addresses from chain") + cfg := AddGameTypeConfig{ L1RPCUrl: rpcURL, Logger: testlog.Logger(t, slog.LevelInfo), ArtifactsLocator: afacts, SaltMixer: "foo", // The values below were pulled from the Superchain Registry for OP Sepolia. - SystemConfigProxy: *supChainConfig.Addresses.SystemConfigProxy, - OPChainProxyAdmin: *supChainConfig.Addresses.ProxyAdmin, - DelayedWETHProxy: *supChainConfig.Addresses.DelayedWETHProxy, + SystemConfigProxy: *chainConfig.Addresses.SystemConfigProxy, + OPChainProxyAdmin: opChainProxyAdmin, + DelayedWETHProxy: delayedWETHProxy, DisputeGameType: 999, DisputeAbsolutePrestate: common.HexToHash("0x1234"), DisputeMaxGameDepth: big.NewInt(73), @@ -54,14 +100,15 @@ func TestAddGameType(t *testing.T) { InitialBond: big.NewInt(1), VM: common.Address(*v200SepoliaAddrs.Mips.Address), Permissionless: false, - L1ProxyAdminOwner: *supChainConfig.Roles.ProxyAdminOwner, + L1ProxyAdminOwner: *chainConfig.Roles.ProxyAdminOwner, OPCMImpl: common.Address(*v200SepoliaAddrs.OPContractsManager.Address), CacheDir: testCacheDir, } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - output, broadcasts, err := AddGameType(ctx, cfg) + addCtx, addCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer addCancel() + + output, broadcasts, err := AddGameType(addCtx, cfg) require.NoError(t, err) require.Equal(t, 1, len(broadcasts)) diff --git a/op-deployer/pkg/deployer/manage/migrate.go b/op-deployer/pkg/deployer/manage/migrate.go index c9a7636b52137..af6c074478775 100644 --- a/op-deployer/pkg/deployer/manage/migrate.go +++ b/op-deployer/pkg/deployer/manage/migrate.go @@ -8,6 +8,8 @@ import ( "os" "strings" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" @@ -123,7 +125,7 @@ func MigrateCLI(cliCtx *cli.Context) error { } cacheDir := cliCtx.String(deployer.CacheDirFlag.Name) - artifactsFS, err := artifacts.Download(ctx, artifactsLocator, artifacts.BarProgressor(), cacheDir) + artifactsFS, err := artifacts.Download(ctx, artifactsLocator, ioutil.BarProgressor(), cacheDir) if err != nil { return fmt.Errorf("failed to download artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/manage/testdata/state.json b/op-deployer/pkg/deployer/manage/testdata/state.json index a0341fa0aa671..c99554e1e9731 100755 --- a/op-deployer/pkg/deployer/manage/testdata/state.json +++ b/op-deployer/pkg/deployer/manage/testdata/state.json @@ -12,8 +12,8 @@ }, "fundDevAccounts": false, "useInterop": false, - "l1ContractsLocator": "tag://op-contracts/v3.0.0-rc.2", - "l2ContractsLocator": "tag://op-contracts/v3.0.0-rc.2", + "l1ContractsLocator": "embedded", + "l2ContractsLocator": "embedded", "chains": [ { "id": "0x00000000000000000000000000000000000000000000000000000000000004d2", diff --git a/op-deployer/pkg/deployer/opcm/dispute_game_factory_test.go b/op-deployer/pkg/deployer/opcm/dispute_game_factory_test.go index c2003cb365caf..b699d40d62aaa 100644 --- a/op-deployer/pkg/deployer/opcm/dispute_game_factory_test.go +++ b/op-deployer/pkg/deployer/opcm/dispute_game_factory_test.go @@ -47,9 +47,10 @@ func TestSetDisputeGameImpl(t *testing.T) { factoryAddr := common.HexToAddress("0x05F9613aDB30026FFd634f38e5C4dFd30a197Fa1") input := SetDisputeGameImplInput{ - Factory: factoryAddr, - Impl: common.Address{'I'}, - GameType: 999, + Factory: factoryAddr, + Impl: common.Address{'I'}, + GameType: 999, + AnchorStateRegistry: common.Address{}, // Do not set as respected game type as we aren't authorized } require.NoError(t, SetDisputeGameImpl(host, input)) } diff --git a/op-deployer/pkg/deployer/opcm/implementations.go b/op-deployer/pkg/deployer/opcm/implementations.go index 938e679b944f5..538df8618980d 100644 --- a/op-deployer/pkg/deployer/opcm/implementations.go +++ b/op-deployer/pkg/deployer/opcm/implementations.go @@ -4,6 +4,7 @@ import ( "math/big" "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/forge" "github.com/ethereum/go-ethereum/common" ) @@ -14,13 +15,16 @@ type DeployImplementationsInput struct { ProofMaturityDelaySeconds *big.Int DisputeGameFinalityDelaySeconds *big.Int MipsVersion *big.Int - // Release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - L1ContractsRelease string - SuperchainConfigProxy common.Address - ProtocolVersionsProxy common.Address - SuperchainProxyAdmin common.Address - UpgradeController common.Address - Challenger common.Address + DevFeatureBitmap common.Hash + FaultGameV2MaxGameDepth *big.Int + FaultGameV2SplitDepth *big.Int + FaultGameV2ClockExtension *big.Int + FaultGameV2MaxClockDuration *big.Int + SuperchainConfigProxy common.Address + ProtocolVersionsProxy common.Address + SuperchainProxyAdmin common.Address + L1ProxyAdminOwner common.Address + Challenger common.Address } type DeployImplementationsOutput struct { @@ -33,6 +37,7 @@ type DeployImplementationsOutput struct { OpcmStandardValidator common.Address `json:"opcmStandardValidatorAddress"` DelayedWETHImpl common.Address `json:"delayedWETHImplAddress"` OptimismPortalImpl common.Address `json:"optimismPortalImplAddress"` + OptimismPortalInteropImpl common.Address `json:"optimismPortalInteropImplAddress"` ETHLockboxImpl common.Address `json:"ethLockboxImplAddress" abi:"ethLockboxImpl"` PreimageOracleSingleton common.Address `json:"preimageOracleSingletonAddress"` MipsSingleton common.Address `json:"mipsSingletonAddress"` @@ -45,6 +50,8 @@ type DeployImplementationsOutput struct { AnchorStateRegistryImpl common.Address `json:"anchorStateRegistryImplAddress"` SuperchainConfigImpl common.Address `json:"superchainConfigImplAddress"` ProtocolVersionsImpl common.Address `json:"protocolVersionsImplAddress"` + FaultDisputeGameV2Impl common.Address `json:"faultDisputeGameV2ImplAddress"` + PermissionedDisputeGameV2Impl common.Address `json:"permissionedDisputeGameV2ImplAddress"` } type DeployImplementationsScript script.DeployScriptWithOutput[DeployImplementationsInput, DeployImplementationsOutput] @@ -53,3 +60,13 @@ type DeployImplementationsScript script.DeployScriptWithOutput[DeployImplementat func NewDeployImplementationsScript(host *script.Host) (DeployImplementationsScript, error) { return script.NewDeployScriptWithOutputFromFile[DeployImplementationsInput, DeployImplementationsOutput](host, "DeployImplementations.s.sol", "DeployImplementations") } + +func NewDeployImplementationsForgeCaller(client *forge.Client) forge.ScriptCaller[DeployImplementationsInput, DeployImplementationsOutput] { + return forge.NewScriptCaller( + client, + "scripts/deploy/DeployImplementations.s.sol:DeployImplementations", + "runWithBytes(bytes)", + &forge.BytesScriptEncoder[DeployImplementationsInput]{TypeName: "DeployImplementationsInput"}, + &forge.BytesScriptDecoder[DeployImplementationsOutput]{TypeName: "DeployImplementationsOutput"}, + ) +} diff --git a/op-deployer/pkg/deployer/opcm/implementations_test.go b/op-deployer/pkg/deployer/opcm/implementations_test.go index dc16615557720..f42fd6a97146b 100644 --- a/op-deployer/pkg/deployer/opcm/implementations_test.go +++ b/op-deployer/pkg/deployer/opcm/implementations_test.go @@ -65,13 +65,16 @@ func TestNewDeployImplementationsScript(t *testing.T) { ProofMaturityDelaySeconds: big.NewInt(4), DisputeGameFinalityDelaySeconds: big.NewInt(5), MipsVersion: big.NewInt(mipsVersion), - // Release version to set OPCM implementations for, of the format `op-contracts/vX.Y.Z`. - L1ContractsRelease: "dev-release", - SuperchainConfigProxy: proxyAddress, - ProtocolVersionsProxy: protocolVersionsAddress, - SuperchainProxyAdmin: proxyAdminAddress, - UpgradeController: common.BigToAddress(big.NewInt(13)), - Challenger: common.BigToAddress(big.NewInt(14)), + DevFeatureBitmap: common.Hash{}, + FaultGameV2MaxGameDepth: big.NewInt(73), + FaultGameV2SplitDepth: big.NewInt(30), + FaultGameV2ClockExtension: big.NewInt(10800), + FaultGameV2MaxClockDuration: big.NewInt(302400), + SuperchainConfigProxy: proxyAddress, + ProtocolVersionsProxy: protocolVersionsAddress, + SuperchainProxyAdmin: proxyAdminAddress, + L1ProxyAdminOwner: common.BigToAddress(big.NewInt(13)), + Challenger: common.BigToAddress(big.NewInt(14)), }) // And do some simple asserts diff --git a/op-deployer/pkg/deployer/opcm/opchain.go b/op-deployer/pkg/deployer/opcm/opchain.go index 9d87fb4979a4e..0ec607a5264ce 100644 --- a/op-deployer/pkg/deployer/opcm/opchain.go +++ b/op-deployer/pkg/deployer/opcm/opchain.go @@ -2,10 +2,10 @@ package opcm import ( _ "embed" - "fmt" "math/big" "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/forge" "github.com/ethereum/go-ethereum/common" ) @@ -35,8 +35,8 @@ type DeployOPChainInput struct { DisputeGameType uint32 DisputeAbsolutePrestate common.Hash - DisputeMaxGameDepth uint64 - DisputeSplitDepth uint64 + DisputeMaxGameDepth *big.Int + DisputeSplitDepth *big.Int DisputeClockExtension uint64 DisputeMaxClockDuration uint64 AllowCustomDisputeParameters bool @@ -45,14 +45,6 @@ type DeployOPChainInput struct { OperatorFeeConstant uint64 } -func (input *DeployOPChainInput) InputSet() bool { - return true -} - -func (input *DeployOPChainInput) StartingAnchorRoot() []byte { - return PermissionedGameStartingAnchorRoot -} - type DeployOPChainOutput struct { OpChainProxyAdmin common.Address AddressManager common.Address @@ -63,7 +55,7 @@ type DeployOPChainOutput struct { L1CrossDomainMessengerProxy common.Address // Fault proof contracts below. OptimismPortalProxy common.Address - ETHLockboxProxy common.Address `evm:"ethLockboxProxy"` + EthLockboxProxy common.Address `evm:"ethLockboxProxy"` DisputeGameFactoryProxy common.Address AnchorStateRegistryProxy common.Address FaultDisputeGame common.Address @@ -72,28 +64,40 @@ type DeployOPChainOutput struct { DelayedWETHPermissionlessGameProxy common.Address } -func (output *DeployOPChainOutput) CheckOutput(input common.Address) error { - return nil -} +type DeployOPChainScript script.DeployScriptWithOutput[DeployOPChainInput, DeployOPChainOutput] -type DeployOPChainScript struct { - Run func(input, output common.Address) error +// NewDeployOPChainScript loads and validates the DeployOPChain script contract +func NewDeployOPChainScript(host *script.Host) (DeployOPChainScript, error) { + return script.NewDeployScriptWithOutputFromFile[DeployOPChainInput, DeployOPChainOutput](host, "DeployOPChain.s.sol", "DeployOPChain") } -func DeployOPChain(host *script.Host, input DeployOPChainInput) (DeployOPChainOutput, error) { - return RunScriptSingle[DeployOPChainInput, DeployOPChainOutput](host, input, "DeployOPChain.s.sol", "DeployOPChain") +func NewDeployOPChainForgeCaller(client *forge.Client) forge.ScriptCaller[DeployOPChainInput, DeployOPChainOutput] { + return forge.NewScriptCaller( + client, + "scripts/deploy/DeployOPChain.s.sol:DeployOPChain", + "runWithBytes(bytes)", + &forge.BytesScriptEncoder[DeployOPChainInput]{TypeName: "DeployOPChainInput"}, + &forge.BytesScriptDecoder[DeployOPChainOutput]{TypeName: "DeployOPChainOutput"}, + ) } type ReadImplementationAddressesInput struct { - DeployOPChainOutput - Opcm common.Address - Release string + AddressManager common.Address + L1ERC721BridgeProxy common.Address + SystemConfigProxy common.Address + OptimismMintableERC20FactoryProxy common.Address + L1StandardBridgeProxy common.Address + OptimismPortalProxy common.Address + DisputeGameFactoryProxy common.Address + DelayedWETHPermissionedGameProxy common.Address + Opcm common.Address } type ReadImplementationAddressesOutput struct { DelayedWETH common.Address OptimismPortal common.Address - ETHLockbox common.Address `evm:"ethLockbox"` + OptimismPortalInterop common.Address + EthLockbox common.Address `evm:"ethLockbox"` SystemConfig common.Address L1CrossDomainMessenger common.Address L1ERC721Bridge common.Address @@ -104,39 +108,19 @@ type ReadImplementationAddressesOutput struct { PreimageOracleSingleton common.Address } -type ReadImplementationAddressesScript struct { - Run func(input, output common.Address) error +type ReadImplementationAddressesScript script.DeployScriptWithOutput[ReadImplementationAddressesInput, ReadImplementationAddressesOutput] + +// NewReadImplementationAddressesScript loads and validates the ReadImplementationAddresses script contract +func NewReadImplementationAddressesScript(host *script.Host) (ReadImplementationAddressesScript, error) { + return script.NewDeployScriptWithOutputFromFile[ReadImplementationAddressesInput, ReadImplementationAddressesOutput](host, "ReadImplementationAddresses.s.sol", "ReadImplementationAddresses") } -func ReadImplementationAddresses(host *script.Host, input ReadImplementationAddressesInput) (ReadImplementationAddressesOutput, error) { - var rio ReadImplementationAddressesOutput - inputAddr := host.NewScriptAddress() - outputAddr := host.NewScriptAddress() - - cleanupInput, err := script.WithPrecompileAtAddress[*ReadImplementationAddressesInput](host, inputAddr, &input) - if err != nil { - return rio, fmt.Errorf("failed to insert ReadImplementationAddressesInput precompile: %w", err) - } - defer cleanupInput() - host.Label(inputAddr, "ReadImplementationAddressesInput") - - cleanupOutput, err := script.WithPrecompileAtAddress[*ReadImplementationAddressesOutput](host, outputAddr, &rio, - script.WithFieldSetter[*ReadImplementationAddressesOutput]) - if err != nil { - return rio, fmt.Errorf("failed to insert ReadImplementationAddressesOutput precompile: %w", err) - } - defer cleanupOutput() - host.Label(outputAddr, "ReadImplementationAddressesOutput") - - deployScript, cleanupDeploy, err := script.WithScript[ReadImplementationAddressesScript](host, "ReadImplementationAddresses.s.sol", "ReadImplementationAddresses") - if err != nil { - return rio, fmt.Errorf("failed to load ReadImplementationAddresses script: %w", err) - } - defer cleanupDeploy() - - if err := deployScript.Run(inputAddr, outputAddr); err != nil { - return rio, fmt.Errorf("failed to run ReadImplementationAddresses script: %w", err) - } - - return rio, nil +func NewReadImplementationAddressesForgeCaller(client *forge.Client) forge.ScriptCaller[ReadImplementationAddressesInput, ReadImplementationAddressesOutput] { + return forge.NewScriptCaller( + client, + "scripts/deploy/ReadImplementationAddresses.s.sol:ReadImplementationAddresses", + "runWithBytes(bytes)", + &forge.BytesScriptEncoder[ReadImplementationAddressesInput]{TypeName: "ReadImplementationAddressesInput"}, + &forge.BytesScriptDecoder[ReadImplementationAddressesOutput]{TypeName: "ReadImplementationAddressesOutput"}, + ) } diff --git a/op-deployer/pkg/deployer/opcm/scripts.go b/op-deployer/pkg/deployer/opcm/scripts.go index e8e884717a579..05e88cf989ee9 100644 --- a/op-deployer/pkg/deployer/opcm/scripts.go +++ b/op-deployer/pkg/deployer/opcm/scripts.go @@ -17,6 +17,7 @@ type Scripts struct { DeployPreimageOracle DeployPreimageOracleScript DeployProxy DeployProxyScript DeploySuperchain DeploySuperchainScript + DeployOPChain DeployOPChainScript } // NewScripts collects all the deployment scripts, raising exceptions if any of them @@ -67,6 +68,11 @@ func NewScripts(host *script.Host) (*Scripts, error) { return nil, fmt.Errorf("failed to load DeployProxy script: %w", err) } + deployOPChain, err := NewDeployOPChainScript(host) + if err != nil { + return nil, fmt.Errorf("failed to load DeployOPChain script: %w", err) + } + return &Scripts{ DeployAlphabetVM: deployAlphabetVM, DeployAltDA: deployAltDA, @@ -77,5 +83,6 @@ func NewScripts(host *script.Host) (*Scripts, error) { DeployProxy: deployProxy, DeployImplementations: deployImplementations, DeploySuperchain: deploySuperchain, + DeployOPChain: deployOPChain, }, nil } diff --git a/op-deployer/pkg/deployer/opcm/superchain.go b/op-deployer/pkg/deployer/opcm/superchain.go index 1cbb08a2bfa6f..479c96e96b880 100644 --- a/op-deployer/pkg/deployer/opcm/superchain.go +++ b/op-deployer/pkg/deployer/opcm/superchain.go @@ -2,6 +2,7 @@ package opcm import ( "github.com/ethereum-optimism/optimism/op-chain-ops/script" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/forge" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/params" ) @@ -29,3 +30,13 @@ type DeploySuperchainScript script.DeployScriptWithOutput[DeploySuperchainInput, func NewDeploySuperchainScript(host *script.Host) (DeploySuperchainScript, error) { return script.NewDeployScriptWithOutputFromFile[DeploySuperchainInput, DeploySuperchainOutput](host, "DeploySuperchain.s.sol", "DeploySuperchain") } + +func NewDeploySuperchainForgeCaller(client *forge.Client) forge.ScriptCaller[DeploySuperchainInput, DeploySuperchainOutput] { + return forge.NewScriptCaller( + client, + "scripts/deploy/DeploySuperchain.s.sol:DeploySuperchain", + "runWithBytes(bytes)", + &forge.BytesScriptEncoder[DeploySuperchainInput]{TypeName: "DeploySuperchainInput"}, + &forge.BytesScriptDecoder[DeploySuperchainOutput]{TypeName: "DeploySuperchainOutput"}, + ) +} diff --git a/op-deployer/pkg/deployer/pipeline/dispute_games.go b/op-deployer/pkg/deployer/pipeline/dispute_games.go index 550c9dba8b3c5..ab061fe050feb 100644 --- a/op-deployer/pkg/deployer/pipeline/dispute_games.go +++ b/op-deployer/pkg/deployer/pipeline/dispute_games.go @@ -127,9 +127,10 @@ func deployDisputeGame( lgr.Info("setting dispute game impl on factory", "respected", game.MakeRespected) sdgiInput := opcm.SetDisputeGameImplInput{ - Factory: thisState.OpChainContracts.DisputeGameFactoryProxy, - Impl: out.DisputeGameImpl, - GameType: game.DisputeGameType, + Factory: thisState.OpChainContracts.DisputeGameFactoryProxy, + Impl: out.DisputeGameImpl, + GameType: game.DisputeGameType, + AnchorStateRegistry: common.Address{}, } if game.MakeRespected { sdgiInput.AnchorStateRegistry = thisState.OpChainContracts.AnchorStateRegistryProxy diff --git a/op-deployer/pkg/deployer/pipeline/implementations.go b/op-deployer/pkg/deployer/pipeline/implementations.go index 307a98fa60857..c12413f3814ea 100644 --- a/op-deployer/pkg/deployer/pipeline/implementations.go +++ b/op-deployer/pkg/deployer/pipeline/implementations.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-service/jsonutil" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" @@ -22,14 +23,6 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro lgr.Info("deploying implementations") - var contractsRelease string - var err error - if intent.L1ContractsLocator.IsTag() { - contractsRelease = intent.L1ContractsLocator.Tag - } else { - contractsRelease = "dev" - } - proofParams, err := jsonutil.MergeJSON( state.SuperchainProofParams{ WithdrawalDelaySeconds: standard.WithdrawalDelaySeconds, @@ -38,6 +31,7 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro ProofMaturityDelaySeconds: standard.ProofMaturityDelaySeconds, DisputeGameFinalityDelaySeconds: standard.DisputeGameFinalityDelaySeconds, MIPSVersion: standard.MIPSVersion, + DevFeatureBitmap: common.Hash{}, }, intent.GlobalDeployOverrides, ) @@ -53,11 +47,15 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro ProofMaturityDelaySeconds: new(big.Int).SetUint64(proofParams.ProofMaturityDelaySeconds), DisputeGameFinalityDelaySeconds: new(big.Int).SetUint64(proofParams.DisputeGameFinalityDelaySeconds), MipsVersion: new(big.Int).SetUint64(proofParams.MIPSVersion), - L1ContractsRelease: contractsRelease, + DevFeatureBitmap: proofParams.DevFeatureBitmap, + FaultGameV2MaxGameDepth: new(big.Int).SetUint64(standard.DisputeMaxGameDepth), + FaultGameV2SplitDepth: new(big.Int).SetUint64(standard.DisputeSplitDepth), + FaultGameV2ClockExtension: new(big.Int).SetUint64(standard.DisputeClockExtension), + FaultGameV2MaxClockDuration: new(big.Int).SetUint64(standard.DisputeMaxClockDuration), SuperchainConfigProxy: st.SuperchainDeployment.SuperchainConfigProxy, ProtocolVersionsProxy: st.SuperchainDeployment.ProtocolVersionsProxy, SuperchainProxyAdmin: st.SuperchainDeployment.SuperchainProxyAdminImpl, - UpgradeController: st.SuperchainRoles.SuperchainProxyAdminOwner, + L1ProxyAdminOwner: st.SuperchainRoles.SuperchainProxyAdminOwner, Challenger: st.SuperchainRoles.Challenger, }, ) @@ -74,6 +72,7 @@ func DeployImplementations(env *Env, intent *state.Intent, st *state.State) erro OpcmStandardValidatorImpl: dio.OpcmStandardValidator, DelayedWethImpl: dio.DelayedWETHImpl, OptimismPortalImpl: dio.OptimismPortalImpl, + OptimismPortalInteropImpl: dio.OptimismPortalInteropImpl, EthLockboxImpl: dio.ETHLockboxImpl, PreimageOracleImpl: dio.PreimageOracleSingleton, MipsImpl: dio.MipsSingleton, diff --git a/op-deployer/pkg/deployer/pipeline/init.go b/op-deployer/pkg/deployer/pipeline/init.go index 6d5b8d7d38d20..f17fe61c00c58 100644 --- a/op-deployer/pkg/deployer/pipeline/init.go +++ b/op-deployer/pkg/deployer/pipeline/init.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/opcm" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" @@ -28,16 +27,6 @@ func InitLiveStrategy(ctx context.Context, env *Env, intent *state.Intent, st *s } hasPredeployedOPCM := intent.OPCMAddress != nil - isL1Tag := intent.L1ContractsLocator.IsTag() - isL2Tag := intent.L2ContractsLocator.IsTag() - - if isL1Tag && !standard.IsSupportedL1Version(intent.L1ContractsLocator.Tag) { - return fmt.Errorf("unsupported L1 version: %s", intent.L1ContractsLocator.Tag) - } - - if isL2Tag && !standard.IsSupportedL2Version(intent.L2ContractsLocator.Tag) { - return fmt.Errorf("unsupported L2 version: %s", intent.L2ContractsLocator.Tag) - } if hasPredeployedOPCM { if intent.SuperchainConfigProxy != nil { diff --git a/op-deployer/pkg/deployer/pipeline/init_test.go b/op-deployer/pkg/deployer/pipeline/init_test.go index 8ed150d69e2c9..f532ed80f989f 100644 --- a/op-deployer/pkg/deployer/pipeline/init_test.go +++ b/op-deployer/pkg/deployer/pipeline/init_test.go @@ -69,7 +69,7 @@ func TestInitLiveStrategy_OPCMReuseLogicSepolia(t *testing.T) { require.Nil(t, st.ImplementationsDeployment) }) - t.Run("tagged L1 locator with standard intent types and standard roles", func(t *testing.T) { + t.Run("embedded L1 locator with standard intent types and standard roles", func(t *testing.T) { runTest := func(configType state.IntentType) { _, afacts := testutil.LocalArtifacts(t) host, err := env.DefaultForkedScriptHost( @@ -85,14 +85,14 @@ func TestInitLiveStrategy_OPCMReuseLogicSepolia(t *testing.T) { stdSuperchainRoles, err := state.GetStandardSuperchainRoles(l1ChainID) require.NoError(t, err) - opcmAddr, err := standard.OPCMImplAddressFor(l1ChainID, artifacts.DefaultL1ContractsLocator.Tag) + opcmAddr, err := standard.OPCMImplAddressFor(l1ChainID, standard.CurrentTag) require.NoError(t, err) intent := &state.Intent{ ConfigType: configType, L1ChainID: l1ChainID, - L1ContractsLocator: artifacts.DefaultL1ContractsLocator, - L2ContractsLocator: artifacts.DefaultL2ContractsLocator, + L1ContractsLocator: artifacts.EmbeddedLocator, + L2ContractsLocator: artifacts.EmbeddedLocator, OPCMAddress: &opcmAddr, } st := &state.State{ diff --git a/op-deployer/pkg/deployer/pipeline/l2genesis.go b/op-deployer/pkg/deployer/pipeline/l2genesis.go index 58e34d5205a5d..c977d0a903354 100644 --- a/op-deployer/pkg/deployer/pipeline/l2genesis.go +++ b/op-deployer/pkg/deployer/pipeline/l2genesis.go @@ -117,7 +117,7 @@ func GenerateL2Genesis(pEnv *Env, intent *state.Intent, bundle ArtifactsBundle, } func calculateL2GenesisOverrides(intent *state.Intent, thisIntent *state.ChainIntent) (l2GenesisOverrides, *genesis.UpgradeScheduleDeployConfig, error) { - schedule := standard.DefaultHardforkScheduleForTag(intent.L1ContractsLocator.Tag) + schedule := standard.DefaultHardforkScheduleForTag(standard.CurrentTag) overrides := defaultOverrides() // Special case for FundDevAccounts since it's both an intent value and an override. diff --git a/op-deployer/pkg/deployer/pipeline/opchain.go b/op-deployer/pkg/deployer/pipeline/opchain.go index b530782d0ae6f..b98b8dd69d5f0 100644 --- a/op-deployer/pkg/deployer/pipeline/opchain.go +++ b/op-deployer/pkg/deployer/pipeline/opchain.go @@ -2,6 +2,7 @@ package pipeline import ( "fmt" + "math/big" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-service/jsonutil" @@ -33,33 +34,39 @@ func DeployOPChain(env *Env, intent *state.Intent, st *state.State, chainID comm return fmt.Errorf("error making deploy OP chain input: %w", err) } - dco, err = opcm.DeployOPChain(env.L1ScriptHost, dci) + dco, err = env.Scripts.DeployOPChain.Run(dci) if err != nil { return fmt.Errorf("error deploying OP chain: %w", err) } st.Chains = append(st.Chains, makeChainState(chainID, dco)) - var release string - if intent.L1ContractsLocator.IsTag() { - release = intent.L1ContractsLocator.Tag - } else { - release = "dev" + readInput := opcm.ReadImplementationAddressesInput{ + AddressManager: dco.AddressManager, + L1ERC721BridgeProxy: dco.L1ERC721BridgeProxy, + SystemConfigProxy: dco.SystemConfigProxy, + OptimismMintableERC20FactoryProxy: dco.OptimismMintableERC20FactoryProxy, + L1StandardBridgeProxy: dco.L1StandardBridgeProxy, + OptimismPortalProxy: dco.OptimismPortalProxy, + DisputeGameFactoryProxy: dco.DisputeGameFactoryProxy, + DelayedWETHPermissionedGameProxy: dco.DelayedWETHPermissionedGameProxy, + Opcm: dci.Opcm, } - readInput := opcm.ReadImplementationAddressesInput{ - DeployOPChainOutput: dco, - Opcm: dci.Opcm, - Release: release, + readImplementations, err := opcm.NewReadImplementationAddressesScript(env.L1ScriptHost) + if err != nil { + return fmt.Errorf("failed to load ReadImplementationAddresses script: %w", err) } - impls, err := opcm.ReadImplementationAddresses(env.L1ScriptHost, readInput) + + impls, err := readImplementations.Run(readInput) if err != nil { - return fmt.Errorf("failed to read implementation addresses: %w", err) + return fmt.Errorf("failed to run ReadImplementationAddresses script: %w", err) } st.ImplementationsDeployment.DelayedWethImpl = impls.DelayedWETH st.ImplementationsDeployment.OptimismPortalImpl = impls.OptimismPortal - st.ImplementationsDeployment.EthLockboxImpl = impls.ETHLockbox + st.ImplementationsDeployment.OptimismPortalInteropImpl = impls.OptimismPortalInterop + st.ImplementationsDeployment.EthLockboxImpl = impls.EthLockbox st.ImplementationsDeployment.SystemConfigImpl = impls.SystemConfig st.ImplementationsDeployment.L1CrossDomainMessengerImpl = impls.L1CrossDomainMessenger st.ImplementationsDeployment.L1Erc721BridgeImpl = impls.L1ERC721Bridge @@ -101,11 +108,11 @@ func makeDCI(intent *state.Intent, thisIntent *state.ChainIntent, chainID common L2ChainId: chainID.Big(), Opcm: st.ImplementationsDeployment.OpcmImpl, SaltMixer: st.Create2Salt.String(), // passing through salt generated at state initialization - GasLimit: standard.GasLimit, + GasLimit: thisIntent.GasLimit, DisputeGameType: proofParams.DisputeGameType, DisputeAbsolutePrestate: proofParams.DisputeAbsolutePrestate, - DisputeMaxGameDepth: proofParams.DisputeMaxGameDepth, - DisputeSplitDepth: proofParams.DisputeSplitDepth, + DisputeMaxGameDepth: new(big.Int).SetUint64(proofParams.DisputeMaxGameDepth), + DisputeSplitDepth: new(big.Int).SetUint64(proofParams.DisputeSplitDepth), DisputeClockExtension: proofParams.DisputeClockExtension, // 3 hours (input in seconds) DisputeMaxClockDuration: proofParams.DisputeMaxClockDuration, // 3.5 days (input in seconds) AllowCustomDisputeParameters: proofParams.DangerouslyAllowCustomDisputeParameters, @@ -124,7 +131,7 @@ func makeChainState(chainID common.Hash, dco opcm.DeployOPChainOutput) *state.Ch opChainContracts.L1StandardBridgeProxy = dco.L1StandardBridgeProxy opChainContracts.L1CrossDomainMessengerProxy = dco.L1CrossDomainMessengerProxy opChainContracts.OptimismPortalProxy = dco.OptimismPortalProxy - opChainContracts.EthLockboxProxy = dco.ETHLockboxProxy + opChainContracts.EthLockboxProxy = dco.EthLockboxProxy opChainContracts.DisputeGameFactoryProxy = dco.DisputeGameFactoryProxy opChainContracts.AnchorStateRegistryProxy = dco.AnchorStateRegistryProxy opChainContracts.FaultDisputeGameImpl = dco.FaultDisputeGame diff --git a/op-deployer/pkg/deployer/pipeline/pre_state.go b/op-deployer/pkg/deployer/pipeline/pre_state.go index 37eaf86d9c24c..cb41f5041a2f0 100644 --- a/op-deployer/pkg/deployer/pipeline/pre_state.go +++ b/op-deployer/pkg/deployer/pipeline/pre_state.go @@ -18,9 +18,10 @@ func GeneratePreState(ctx context.Context, pEnv *Env, globalIntent *state.Intent lgr := pEnv.Logger.New("stage", "generate-pre-state") if preStateBuilder == nil { - lgr.Warn("preStateBuilder not found - skipping prestate generation") + lgr.Debug("preStateBuilder not found, skipping prestate generation") return nil } + lgr.Info("preStateBuilder found, proceeding with prestate generation") prestateBuilderOpts := []prestate.PrestateBuilderOption{} generateDepSet := false diff --git a/op-deployer/pkg/deployer/pipeline/seal_l1_dev_genesis.go b/op-deployer/pkg/deployer/pipeline/seal_l1_dev_genesis.go index dec3e742a1668..d267b841b1378 100644 --- a/op-deployer/pkg/deployer/pipeline/seal_l1_dev_genesis.go +++ b/op-deployer/pkg/deployer/pipeline/seal_l1_dev_genesis.go @@ -48,6 +48,9 @@ func SealL1DevGenesis(env *Env, intent *state.Intent, st *state.State) error { }, L1ChainID: eth.ChainIDFromUInt64(intent.L1ChainID), L1PragueTimeOffset: l1DevParams.PragueTimeOffset, + L1OsakaTimeOffset: l1DevParams.OsakaTimeOffset, + L1BPO1TimeOffset: l1DevParams.BPO1TimeOffset, + BlobScheduleConfig: l1DevParams.BlobSchedule, }) if err != nil { return fmt.Errorf("failed to create dev L1 genesis template: %w", err) diff --git a/op-deployer/pkg/deployer/standard/standard.go b/op-deployer/pkg/deployer/standard/standard.go index 8ba50b4fe70f3..ffcbe2eeadede 100644 --- a/op-deployer/pkg/deployer/standard/standard.go +++ b/op-deployer/pkg/deployer/standard/standard.go @@ -1,9 +1,7 @@ package standard import ( - "embed" "fmt" - "net/url" "github.com/ethereum/go-ethereum/common/hexutil" @@ -27,7 +25,7 @@ const ( ChallengePeriodSeconds uint64 = 86400 ProofMaturityDelaySeconds uint64 = 604800 DisputeGameFinalityDelaySeconds uint64 = 302400 - MIPSVersion uint64 = 7 + MIPSVersion uint64 = 8 DisputeGameType uint32 = 1 // PERMISSIONED game type DisputeMaxGameDepth uint64 = 73 DisputeSplitDepth uint64 = 30 @@ -43,72 +41,15 @@ const ( ContractsV200Tag = "op-contracts/v2.0.0" ContractsV300Tag = "op-contracts/v3.0.0" ContractsV400Tag = "op-contracts/v4.0.0-rc.7" + CurrentTag = ContractsV400Tag ) var DisputeAbsolutePrestate = common.HexToHash("0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c") -var DefaultL1ContractsTag = ContractsV400Tag - -var DefaultL2ContractsTag = ContractsV400Tag - var VaultMinWithdrawalAmount = mustHexBigFromHex("0x8ac7230489e80000") var GovernanceTokenOwner = common.HexToAddress("0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAdDEad") -type TaggedRelease struct { - ArtifactsHash common.Hash - ContentHash common.Hash -} - -func (t TaggedRelease) URL() string { - return fmt.Sprintf("https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-%x.tar.gz", t.ContentHash) -} - -var taggedReleases = map[string]TaggedRelease{ - ContractsV160Tag: { - ArtifactsHash: common.HexToHash("d20a930cc0ff204c2d93b7aa60755ec7859ba4f328b881f5090c6a6a2a86dcba"), - ContentHash: common.HexToHash("e1f0c4020618c4a98972e7124c39686cab2e31d5d7846f9ce5e0d5eed0f5ff32"), - }, - ContractsV170Beta1L2Tag: { - ArtifactsHash: common.HexToHash("9e3ad322ec9b2775d59143ce6874892f9b04781742c603ad59165159e90b00b9"), - ContentHash: common.HexToHash("b0fb1f6f674519d637cff39a22187a5993d7f81a6d7b7be6507a0b50a5e38597"), - }, - ContractsV180Tag: { - ArtifactsHash: common.HexToHash("78f186df4e9a02a6421bd9c3641b281e297535140967faa428c938286923976a"), - ContentHash: common.HexToHash("361ebf1f520c20d932695b00babfff6923ce2530cd05b2776eb74e07038898a6"), - }, - ContractsV200Tag: { - ArtifactsHash: common.HexToHash("32e11c96e07b83619f419595facb273368dccfe2439287549e7b436c9b522204"), - ContentHash: common.HexToHash("1cec51ed629c0394b8fb17ff2c6fa45c406c30f94ebbd37d4c90ede6c29ad608"), - }, - ContractsV300Tag: { - ArtifactsHash: common.HexToHash("40661d078e6efe7106b95d6fc5c4fda8db144487d85a47abd246cb3afcb41ab2"), - ContentHash: common.HexToHash("147b9fae70608da2975a01be3d98948306f89ba1930af7c917eea41a54d87cdb"), - }, - ContractsV400Tag: { - ArtifactsHash: common.HexToHash("da1d9ca1a4ebf80c4842ee3414ef1d13db7d1bb9e1fbbded5a21f28479d7cdf4"), - ContentHash: common.HexToHash("67966a2cb9945e1d9ab40e9c61f499e73cdb31d21b8d29a5a5c909b2b13ecd70"), - }, -} - -func AllTags() []string { - allTags := make([]string, 0, len(taggedReleases)) - for tag := range taggedReleases { - allTags = append(allTags, tag) - } - return allTags -} - -var _ embed.FS - -func IsSupportedL1Version(tag string) bool { - return tag == ContractsV400Tag -} - -func IsSupportedL2Version(tag string) bool { - return tag == ContractsV400Tag -} - func L1VersionsFor(chainID uint64) (validation.Versions, error) { switch chainID { case 1: @@ -220,27 +161,6 @@ func ProtocolVersionsOwner(chainID uint64) (common.Address, error) { } } -func ArtifactsURLForTag(tag string) (*url.URL, error) { - release, ok := taggedReleases[tag] - if !ok { - var validTagsStr string - for t := range taggedReleases { - validTagsStr += fmt.Sprintf(" %s\n", t) - } - return nil, fmt.Errorf("unsupported tag: %s\nValid tags are:\n%s", tag, validTagsStr) - } - - return url.Parse(release.URL()) -} - -func ArtifactsHashForTag(tag string) (common.Hash, error) { - release, ok := taggedReleases[tag] - if !ok { - return common.Hash{}, fmt.Errorf("unsupported tag: %s", tag) - } - return release.ArtifactsHash, nil -} - // DefaultHardforkScheduleForTag is used to determine which hardforks should be activated by default given a // contract tag. For example, passing in v1.6.0 will return all hardforks up to and including Granite. This allows // OP Deployer to set sane defaults for hardforks. This is not an ideal solution, but it will have to work until we get diff --git a/op-deployer/pkg/deployer/state/chain_intent.go b/op-deployer/pkg/deployer/state/chain_intent.go index 3ad1a3dea3b00..819bc9e3f1f9f 100644 --- a/op-deployer/pkg/deployer/state/chain_intent.go +++ b/op-deployer/pkg/deployer/state/chain_intent.go @@ -64,6 +64,7 @@ type ChainIntent struct { Eip1559DenominatorCanyon uint64 `json:"eip1559DenominatorCanyon" toml:"eip1559DenominatorCanyon"` Eip1559Denominator uint64 `json:"eip1559Denominator" toml:"eip1559Denominator"` Eip1559Elasticity uint64 `json:"eip1559Elasticity" toml:"eip1559Elasticity"` + GasLimit uint64 `json:"gasLimit" toml:"gasLimit"` Roles ChainRoles `json:"roles" toml:"roles"` DeployOverrides map[string]any `json:"deployOverrides" toml:"deployOverrides"` DangerousAltDAConfig genesis.AltDADeployConfig `json:"dangerousAltDAConfig,omitempty" toml:"dangerousAltDAConfig,omitempty"` @@ -71,6 +72,7 @@ type ChainIntent struct { OperatorFeeScalar uint32 `json:"operatorFeeScalar,omitempty" toml:"operatorFeeScalar,omitempty"` OperatorFeeConstant uint64 `json:"operatorFeeConstant,omitempty" toml:"operatorFeeConstant,omitempty"` L1StartBlockHash *common.Hash `json:"l1StartBlockHash,omitempty" toml:"l1StartBlockHash,omitempty"` + MinBaseFee uint64 `json:"minBaseFee,omitempty" toml:"minBaseFee,omitempty"` // Optional. For development purposes only. Only enabled if the operation mode targets a genesis-file output. L2DevGenesisParams *L2DevGenesisParams `json:"l2DevGenesisParams,omitempty" toml:"l2DevGenesisParams,omitempty"` @@ -87,6 +89,7 @@ type ChainRoles struct { } var ErrFeeVaultZeroAddress = fmt.Errorf("chain has a fee vault set to zero address") +var ErrGasLimitZeroValue = fmt.Errorf("chain has a gas limit set to zero value") var ErrNonStandardValue = fmt.Errorf("chain contains non-standard config value") var ErrEip1559ZeroValue = fmt.Errorf("eip1559 param is set to zero value") var ErrIncompatibleValue = fmt.Errorf("chain contains incompatible config value") @@ -105,6 +108,11 @@ func (c *ChainIntent) Check() error { c.Eip1559Elasticity == 0 { return fmt.Errorf("%w: chainId=%s", ErrEip1559ZeroValue, c.ID) } + + if c.GasLimit == 0 { + return fmt.Errorf("%w: chainId=%s", ErrGasLimitZeroValue, c.ID) + } + if c.BaseFeeVaultRecipient == emptyAddress || c.L1FeeVaultRecipient == emptyAddress || c.SequencerFeeVaultRecipient == emptyAddress { diff --git a/op-deployer/pkg/deployer/state/deploy_config.go b/op-deployer/pkg/deployer/state/deploy_config.go index 495b7ce4d0da6..d497e0d08eb26 100644 --- a/op-deployer/pkg/deployer/state/deploy_config.go +++ b/op-deployer/pkg/deployer/state/deploy_config.go @@ -23,7 +23,7 @@ var ( ) func CombineDeployConfig(intent *Intent, chainIntent *ChainIntent, state *State, chainState *ChainState) (genesis.DeployConfig, error) { - upgradeSchedule := standard.DefaultHardforkScheduleForTag(intent.L1ContractsLocator.Tag) + upgradeSchedule := standard.DefaultHardforkScheduleForTag(standard.CurrentTag) cfg := genesis.DeployConfig{ L1DependenciesConfig: genesis.L1DependenciesConfig{ @@ -39,7 +39,7 @@ func CombineDeployConfig(intent *Intent, chainIntent *ChainIntent, state *State, FundDevAccounts: intent.FundDevAccounts, }, L2GenesisBlockDeployConfig: genesis.L2GenesisBlockDeployConfig{ - L2GenesisBlockGasLimit: 60_000_000, + L2GenesisBlockGasLimit: hexutil.Uint64(chainIntent.GasLimit), L2GenesisBlockBaseFeePerGas: &l2GenesisBlockBaseFeePerGas, }, L2VaultsDeployConfig: genesis.L2VaultsDeployConfig{ @@ -57,11 +57,11 @@ func CombineDeployConfig(intent *Intent, chainIntent *ChainIntent, state *State, EnableGovernance: false, GovernanceTokenSymbol: "OP", GovernanceTokenName: "Optimism", - GovernanceTokenOwner: common.HexToAddress("0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAdDEad"), + GovernanceTokenOwner: standard.GovernanceTokenOwner, }, GasPriceOracleDeployConfig: genesis.GasPriceOracleDeployConfig{ - GasPriceOracleBaseFeeScalar: 1368, - GasPriceOracleBlobBaseFeeScalar: 810949, + GasPriceOracleBaseFeeScalar: standard.BasefeeScalar, + GasPriceOracleBlobBaseFeeScalar: standard.BlobBaseFeeScalar, GasPriceOracleOperatorFeeScalar: chainIntent.OperatorFeeScalar, GasPriceOracleOperatorFeeConstant: chainIntent.OperatorFeeConstant, }, diff --git a/op-deployer/pkg/deployer/state/deploy_config_test.go b/op-deployer/pkg/deployer/state/deploy_config_test.go index 93ae5a0b57bba..a91a90a8cbcb8 100644 --- a/op-deployer/pkg/deployer/state/deploy_config_test.go +++ b/op-deployer/pkg/deployer/state/deploy_config_test.go @@ -6,7 +6,6 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/stretchr/testify/require" @@ -15,7 +14,7 @@ import ( func TestCombineDeployConfig(t *testing.T) { intent := Intent{ L1ChainID: 1, - L1ContractsLocator: artifacts.MustNewLocatorFromTag(standard.ContractsV170Beta1L2Tag), + L1ContractsLocator: artifacts.EmbeddedLocator, } chainState := ChainState{ ID: common.HexToHash("0x123"), @@ -23,6 +22,7 @@ func TestCombineDeployConfig(t *testing.T) { chainIntent := ChainIntent{ Eip1559Denominator: 1, Eip1559Elasticity: 2, + GasLimit: standard.GasLimit, BaseFeeVaultRecipient: common.HexToAddress("0x123"), L1FeeVaultRecipient: common.HexToAddress("0x456"), SequencerFeeVaultRecipient: common.HexToAddress("0x789"), @@ -40,13 +40,20 @@ func TestCombineDeployConfig(t *testing.T) { // apply hard fork overrides chainIntent.DeployOverrides = map[string]any{ - "l2GenesisGraniteTimeOffset": "0x8", - "l2GenesisHoloceneTimeOffset": "0x10", + "l2GenesisFjordTimeOffset": "0x1", + "l2GenesisGraniteTimeOffset": "0x2", + "l2GenesisHoloceneTimeOffset": "0x3", + "l2GenesisIsthmusTimeOffset": "0x4", + "l2GenesisJovianTimeOffset": "0x5", + "l2GenesisInteropTimeOffset": "0x6", } out, err := CombineDeployConfig(&intent, &chainIntent, &state, &chainState) require.NoError(t, err) - require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisFjordTimeOffset, hexutil.Uint64(0)) - require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisGraniteTimeOffset, hexutil.Uint64(8)) - require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisHoloceneTimeOffset, hexutil.Uint64(16)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisFjordTimeOffset, hexutil.Uint64(1)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisGraniteTimeOffset, hexutil.Uint64(2)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisHoloceneTimeOffset, hexutil.Uint64(3)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisIsthmusTimeOffset, hexutil.Uint64(4)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisJovianTimeOffset, hexutil.Uint64(5)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisInteropTimeOffset, hexutil.Uint64(6)) } diff --git a/op-deployer/pkg/deployer/state/intent.go b/op-deployer/pkg/deployer/state/intent.go index 29191ffb462c9..8c331e9f64296 100644 --- a/op-deployer/pkg/deployer/state/intent.go +++ b/op-deployer/pkg/deployer/state/intent.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" @@ -29,12 +30,13 @@ var emptyAddress common.Address var emptyHash common.Hash type SuperchainProofParams struct { - WithdrawalDelaySeconds uint64 `json:"faultGameWithdrawalDelay" toml:"faultGameWithdrawalDelay"` - MinProposalSizeBytes uint64 `json:"preimageOracleMinProposalSize" toml:"preimageOracleMinProposalSize"` - ChallengePeriodSeconds uint64 `json:"preimageOracleChallengePeriod" toml:"preimageOracleChallengePeriod"` - ProofMaturityDelaySeconds uint64 `json:"proofMaturityDelaySeconds" toml:"proofMaturityDelaySeconds"` - DisputeGameFinalityDelaySeconds uint64 `json:"disputeGameFinalityDelaySeconds" toml:"disputeGameFinalityDelaySeconds"` - MIPSVersion uint64 `json:"mipsVersion" toml:"mipsVersion"` + WithdrawalDelaySeconds uint64 `json:"faultGameWithdrawalDelay" toml:"faultGameWithdrawalDelay"` + MinProposalSizeBytes uint64 `json:"preimageOracleMinProposalSize" toml:"preimageOracleMinProposalSize"` + ChallengePeriodSeconds uint64 `json:"preimageOracleChallengePeriod" toml:"preimageOracleChallengePeriod"` + ProofMaturityDelaySeconds uint64 `json:"proofMaturityDelaySeconds" toml:"proofMaturityDelaySeconds"` + DisputeGameFinalityDelaySeconds uint64 `json:"disputeGameFinalityDelaySeconds" toml:"disputeGameFinalityDelaySeconds"` + MIPSVersion uint64 `json:"mipsVersion" toml:"mipsVersion"` + DevFeatureBitmap common.Hash `json:"devFeatureBitmap" toml:"devFeatureBitmap"` } type L1DevGenesisBlockParams struct { @@ -54,6 +56,16 @@ type L1DevGenesisParams struct { // PragueTimeOffset configures Prague (aka Pectra) to be activated at the given time after L1 dev genesis time. PragueTimeOffset *uint64 `json:"pragueTimeOffset" toml:"pragueTimeOffset"` + // OsakaTimeOffset configures Osaka (the EL changes in the Fusaka Ethereum fork) to be + // activated at the given time after L1 dev genesis time. + OsakaTimeOffset *uint64 `json:"osakaTimeOffset" toml:"osakaTimeOffset"` + + // BPO1TimeOffset configures the BPO1 fork to be activated at the given time after L1 dev + // genesis time. + BPO1TimeOffset *uint64 `json:"bpo1TimeOffset" toml:"bpo1TimeOffset"` + + BlobSchedule *params.BlobScheduleConfig `json:"blobSchedule"` + // Prefund is a map of addresses to balances (in wei), to prefund in the L1 dev genesis state. // This is independent of the "Prefund" functionality that may fund a default 20 test accounts. Prefund map[common.Address]*hexutil.U256 `json:"prefund" toml:"prefund"` @@ -84,13 +96,11 @@ func (c *Intent) L1ChainIDBig() *big.Int { } func (c *Intent) validateCustomConfig() error { - if c.L1ContractsLocator == nil || - (c.L1ContractsLocator.Tag == "" && c.L1ContractsLocator.URL == &url.URL{}) { + if c.L1ContractsLocator == nil { return ErrL1ContractsLocatorUndefined } - if c.L2ContractsLocator == nil || - (c.L2ContractsLocator.Tag == "" && c.L2ContractsLocator.URL == &url.URL{}) { + if c.L2ContractsLocator == nil { return ErrL2ContractsLocatorUndefined } @@ -127,9 +137,6 @@ func (c *Intent) validateStandardValues() error { if err := c.checkL1Prod(); err != nil { return err } - if err := c.checkL2Prod(); err != nil { - return err - } if c.SuperchainConfigProxy != nil { return ErrIncompatibleValue @@ -139,7 +146,7 @@ func (c *Intent) validateStandardValues() error { return ErrIncompatibleValue } - standardOPCM, err := standard.OPCMImplAddressFor(c.L1ChainID, c.L1ContractsLocator.Tag) + standardOPCM, err := standard.OPCMImplAddressFor(c.L1ChainID, standard.CurrentTag) if err != nil { return fmt.Errorf("error getting OPCM address: %w", err) } @@ -156,6 +163,9 @@ func (c *Intent) validateStandardValues() error { chain.Eip1559Elasticity != standard.Eip1559Elasticity { return fmt.Errorf("%w: chainId=%s", ErrNonStandardValue, chain.ID) } + if chain.GasLimit != standard.GasLimit { + return fmt.Errorf("%w: chainId=%s", ErrNonStandardValue, chain.ID) + } if len(chain.AdditionalDisputeGames) > 0 { return fmt.Errorf("%w: chainId=%s additionalDisputeGames must be nil", ErrNonStandardValue, chain.ID) } @@ -249,11 +259,11 @@ func (c *Intent) checkL1Prod() error { return err } - if _, ok := versions[validation.Semver(c.L1ContractsLocator.Tag)]; !ok { - return fmt.Errorf("tag '%s' not found in standard versions", c.L1ContractsLocator.Tag) + if _, ok := versions[validation.Semver(standard.CurrentTag)]; !ok { + return fmt.Errorf("tag '%s' not found in standard versions", standard.CurrentTag) } - opcmAddr, err := standard.OPCMImplAddressFor(c.L1ChainID, c.L1ContractsLocator.Tag) + opcmAddr, err := standard.OPCMImplAddressFor(c.L1ChainID, standard.CurrentTag) if err != nil { return fmt.Errorf("error getting OPCM address: %w", err) } @@ -264,11 +274,6 @@ func (c *Intent) checkL1Prod() error { return nil } -func (c *Intent) checkL2Prod() error { - _, err := standard.ArtifactsURLForTag(c.L2ContractsLocator.Tag) - return err -} - func NewIntent(configType IntentType, l1ChainId uint64, l2ChainIds []common.Hash) (intent Intent, err error) { switch configType { case IntentTypeCustom: @@ -303,14 +308,15 @@ func NewIntentCustom(l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) for _, l2ChainID := range l2ChainIds { intent.Chains = append(intent.Chains, &ChainIntent{ - ID: l2ChainID, + ID: l2ChainID, + GasLimit: standard.GasLimit, }) } return intent, nil } func NewIntentStandard(l1ChainId uint64, l2ChainIds []common.Hash) (Intent, error) { - opcmAddr, err := standard.OPCMImplAddressFor(l1ChainId, artifacts.DefaultL1ContractsLocator.Tag) + opcmAddr, err := standard.OPCMImplAddressFor(l1ChainId, standard.CurrentTag) if err != nil { return Intent{}, fmt.Errorf("error getting OPCM impl address: %w", err) } @@ -342,6 +348,7 @@ func NewIntentStandard(l1ChainId uint64, l2ChainIds []common.Hash) (Intent, erro Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, Eip1559Denominator: standard.Eip1559Denominator, Eip1559Elasticity: standard.Eip1559Elasticity, + GasLimit: standard.GasLimit, Roles: ChainRoles{ Challenger: challenger, L1ProxyAdminOwner: l1ProxyAdminOwner, diff --git a/op-deployer/pkg/deployer/testutil/env.go b/op-deployer/pkg/deployer/testutil/env.go index a8d9147c3e996..175fecb7d7aac 100644 --- a/op-deployer/pkg/deployer/testutil/env.go +++ b/op-deployer/pkg/deployer/testutil/env.go @@ -8,6 +8,8 @@ import ( "runtime" "testing" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" op_service "github.com/ethereum-optimism/optimism/op-service" @@ -29,7 +31,7 @@ func LocalArtifacts(t *testing.T) (*artifacts.Locator, foundry.StatDirFs) { testCacheDir := testutils.IsolatedTestDirWithAutoCleanup(t) - artifactsFS, err := artifacts.Download(context.Background(), loc, artifacts.NoopProgressor(), testCacheDir) + artifactsFS, err := artifacts.Download(context.Background(), loc, ioutil.NoopProgressor(), testCacheDir) require.NoError(t, err) return loc, artifactsFS diff --git a/op-deployer/pkg/deployer/upgrade/flags.go b/op-deployer/pkg/deployer/upgrade/flags.go index 0a7a4f7564859..0382faf35f687 100644 --- a/op-deployer/pkg/deployer/upgrade/flags.go +++ b/op-deployer/pkg/deployer/upgrade/flags.go @@ -43,7 +43,7 @@ var Commands = cli.Commands{ }, &cli.Command{ Name: "v4.0.0", - Usage: "upgrades a chain to version v.0.0", + Usage: "upgrades a chain to version v4.0.0", Flags: append([]cli.Flag{ deployer.L1RPCURLFlag, ConfigFlag, diff --git a/op-deployer/pkg/deployer/upgrade/upgrader.go b/op-deployer/pkg/deployer/upgrade/upgrader.go index 2bd6edd2a889d..b60c368bce948 100644 --- a/op-deployer/pkg/deployer/upgrade/upgrader.go +++ b/op-deployer/pkg/deployer/upgrade/upgrader.go @@ -6,6 +6,8 @@ import ( "fmt" "os" + "github.com/ethereum-optimism/optimism/op-service/ioutil" + "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" @@ -55,7 +57,7 @@ func UpgradeCLI(upgrader Upgrader) func(*cli.Context) error { depAddr := common.Address{'D'} cacheDir := cliCtx.String(deployer.CacheDirFlag.Name) - artifactsFS, err := artifacts.Download(ctx, artifactsLocator, artifacts.BarProgressor(), cacheDir) + artifactsFS, err := artifacts.Download(ctx, artifactsLocator, ioutil.BarProgressor(), cacheDir) if err != nil { return fmt.Errorf("failed to download L1 artifacts: %w", err) } diff --git a/op-deployer/pkg/deployer/utils.go b/op-deployer/pkg/deployer/utils.go new file mode 100644 index 0000000000000..09691e71c6a4a --- /dev/null +++ b/op-deployer/pkg/deployer/utils.go @@ -0,0 +1,61 @@ +package deployer + +import ( + "fmt" + "log" + "os" + "path" +) + +type DeploymentTarget string + +const ( + DeploymentTargetLive DeploymentTarget = "live" + DeploymentTargetGenesis DeploymentTarget = "genesis" + DeploymentTargetCalldata DeploymentTarget = "calldata" + DeploymentTargetNoop DeploymentTarget = "noop" +) + +func NewDeploymentTarget(s string) (DeploymentTarget, error) { + switch s { + case string(DeploymentTargetLive): + return DeploymentTargetLive, nil + case string(DeploymentTargetGenesis): + return DeploymentTargetGenesis, nil + case string(DeploymentTargetCalldata): + return DeploymentTargetCalldata, nil + case string(DeploymentTargetNoop): + return DeploymentTargetNoop, nil + default: + return "", fmt.Errorf("invalid deployment target: %s", s) + } +} + +func cwd() string { + dir, err := os.Getwd() + if err != nil { + return "" + } + return dir +} + +func DefaultCacheDir() string { + var cacheDir string + + homeDir, err := os.UserHomeDir() + if err != nil { + cacheDir = ".op-deployer/cache" + log.Printf("error getting user home directory: %v, using fallback directory: %s\n", err, cacheDir) + } else { + cacheDir = path.Join(homeDir, ".op-deployer/cache") + } + + return cacheDir +} + +func CreateCacheDir(cacheDir string) error { + if err := os.MkdirAll(cacheDir, 0755); err != nil { + return fmt.Errorf("failed to create cache directory %s: %w", cacheDir, err) + } + return nil +} diff --git a/op-deployer/pkg/deployer/utils_test.go b/op-deployer/pkg/deployer/utils_test.go new file mode 100644 index 0000000000000..9e13ff5d4d7c2 --- /dev/null +++ b/op-deployer/pkg/deployer/utils_test.go @@ -0,0 +1,12 @@ +package deployer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestEnsureDefaultCacheDir(t *testing.T) { + cacheDir := DefaultCacheDir() + require.NotNil(t, cacheDir) +} diff --git a/op-deployer/pkg/deployer/verify/artifacts.go b/op-deployer/pkg/deployer/verify/artifacts.go index 9cbb0e5f828e2..ce3f56c7c6e03 100644 --- a/op-deployer/pkg/deployer/verify/artifacts.go +++ b/op-deployer/pkg/deployer/verify/artifacts.go @@ -30,7 +30,7 @@ var contractNameExceptions = map[string]string{ "OpcmDeployer": "OPContractsManager.sol/OPContractsManagerDeployer.json", "OpcmUpgrader": "OPContractsManager.sol/OPContractsManagerUpgrader.json", "OpcmInteropMigrator": "OPContractsManager.sol/OPContractsManagerInteropMigrator.json", - "OpcmStandardValidator": "OPContractsManager.sol/OPContractsManagerStandardValidator.json", + "OpcmStandardValidator": "OPContractsManagerStandardValidator.sol/OPContractsManagerStandardValidator.json", "Mips": "MIPS64.sol/MIPS64.json", } diff --git a/op-deployer/pkg/deployer/verify/constructors_test.go b/op-deployer/pkg/deployer/verify/constructors_test.go index 20a3dc0c2e2fd..7a3d2757b8997 100644 --- a/op-deployer/pkg/deployer/verify/constructors_test.go +++ b/op-deployer/pkg/deployer/verify/constructors_test.go @@ -26,11 +26,6 @@ func TestCalculateTypeSlots(t *testing.T) { "type": "address", "internalType": "contract IProxyAdmin" }, - { - "name": "_l1ContractsRelease", - "type": "string", - "internalType": "string" - }, { "name": "_blueprints", "type": "tuple", @@ -149,11 +144,6 @@ func TestCalculateTypeSlots(t *testing.T) { "internalType": "address" } ] - }, - { - "name": "_upgradeController", - "type": "address", - "internalType": "address" } ]` @@ -166,6 +156,6 @@ func TestCalculateTypeSlots(t *testing.T) { totalSlots += calculateTypeSlots(arg.Type) } - require.Equal(t, 28, totalSlots) + require.Equal(t, 24, totalSlots) }) } diff --git a/op-deployer/pkg/deployer/verify/verifier.go b/op-deployer/pkg/deployer/verify/verifier.go index ec48b9d6918b6..fe865d5da7a27 100644 --- a/op-deployer/pkg/deployer/verify/verifier.go +++ b/op-deployer/pkg/deployer/verify/verifier.go @@ -37,7 +37,7 @@ func NewVerifier(apiKey string, l1ChainID uint64, artifactsFS foundry.StatDirFs, } l.Info("found etherscan url", "url", etherscanUrl) - etherscan := NewEtherscanClient(apiKey, etherscanUrl, rate.NewLimiter(rate.Limit(3), 2)) + etherscan := NewEtherscanClient(apiKey, etherscanUrl, rate.NewLimiter(rate.Limit(1), 1)) return &Verifier{ l1ChainID: l1ChainID, @@ -88,7 +88,7 @@ func VerifyCLI(cliCtx *cli.Context) error { if err != nil { return fmt.Errorf("failed to parse l1 contracts release locator: %w", err) } - artifactsFS, err := artifacts.Download(ctx, locator, nil, deployer.GetDefaultCacheDir()) + artifactsFS, err := artifacts.Download(ctx, locator, nil, deployer.DefaultCacheDir()) if err != nil { return fmt.Errorf("failed to get artifacts: %w", err) } diff --git a/op-deployer/scripts/forge-checksums.sh b/op-deployer/scripts/forge-checksums.sh new file mode 100755 index 0000000000000..1a79ce45b3e22 --- /dev/null +++ b/op-deployer/scripts/forge-checksums.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Usage: ./forge-checksums.sh v1.3.1 +if [[ $# -ne 1 ]]; then + echo "usage: $0 vX.Y.Z" >&2 + exit 1 +fi +VER="$1" + +# Matrix of supported OS/arch combos +pairs=( + "darwin_amd64" + "darwin_arm64" + "linux_amd64" + "linux_arm64" + "alpine_amd64" + "alpine_arm64" +) + +# Resolve paths +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +JSON_PATH="${SCRIPT_DIR}/../pkg/deployer/forge/version.json" + +# Temp workspace +TMP_DIR="$(mktemp -d)" +trap 'rm -rf "${TMP_DIR}"' EXIT + +declare -A sums + +# Download each tarball and compute sha256 over the tar.gz (matches code path) +for pair in "${pairs[@]}"; do + os="${pair%_*}" + arch="${pair#*_}" + url="https://github.com/foundry-rs/foundry/releases/download/${VER}/foundry_${VER}_${os}_${arch}.tar.gz" + out="${TMP_DIR}/foundry_${VER}_${pair}.tar.gz" + + echo "--------------------------------" + echo "Processing ${pair}..." + + echo "Downloading ${url}" + curl -fsSL --retry 3 --retry-delay 1 -o "${out}" "${url}" + + echo "Computing checksum" + sha="$(shasum -a 256 "${out}" | awk '{print $1}')" + echo "Checksum for ${pair}: ${sha}" + sums["${pair}"]="${sha}" +done + +echo "--------------------------------" +echo "Done computing checksums" +echo "Writing results to ${JSON_PATH}" + +# Write version.json +mkdir -p "$(dirname "${JSON_PATH}")" +{ + printf '{\n' + printf ' "forge": "%s",\n' "${VER}" + printf ' "checksums": {\n' + for ((i=0; i<${#pairs[@]}; i++)); do + key="${pairs[$i]}" + val="${sums[$key]}" + if (( i < ${#pairs[@]} - 1 )); then + printf ' "%s": "%s",\n' "${key}" "${val}" + else + printf ' "%s": "%s"\n' "${key}" "${val}" + fi + done + printf ' }\n' + printf '}\n' +} > "${JSON_PATH}" + +echo "Success!" diff --git a/op-devstack/README.md b/op-devstack/README.md index 119e6b385b78c..c5e527a474eff 100644 --- a/op-devstack/README.md +++ b/op-devstack/README.md @@ -151,4 +151,17 @@ The following environment variables can be used to configure devstack: - `DEVSTACK_ORCHESTRATOR`: Configures the preferred orchestrator kind (see Orchestrator interface section above). - `DEVSTACK_KEYS_SALT`: Seeds the keys generated with `NewHDWallet`. This is useful for "isolating" test runs, and might be needed to reproduce CI and/or acceptance test runs. It can be any string, including the empty one to use the "usual" devkeys. - `DEVNET_ENV_URL`: Used when `DEVSTACK_ORCHESTRATOR=sysext` to specify the network descriptor URL. -- `DEVNET_EXPECT_PRECONDITIONS_MET`: This can be set of force test failures when their pre-conditions are not met, which would otherwise result in them being skipped. This is helpful in particular for runs that do intend to run specific tests (as opposed to whatever is available). `op-acceptor` does set that variable, for example. \ No newline at end of file +- `DEVNET_EXPECT_PRECONDITIONS_MET`: This can be set of force test failures when their pre-conditions are not met, which would otherwise result in them being skipped. This is helpful in particular for runs that do intend to run specific tests (as opposed to whatever is available). `op-acceptor` does set that variable, for example. + +Rust stack env vars: +- `DEVSTACK_L2CL_KIND=kona` to select kona as default L2 CL node +- `DEVSTACK_L2EL_KIND=op-reth` to select op-reth as default L2 EL node +- `KONA_NODE_EXEC_PATH=/home/USERHERE/projects/kona/target/debug/kona-node` to select the kona-node executable to run +- `OP_RETH_EXEC_PATH=/home/USERHERE/projects/reth/target/release/op-reth` to select the op-reth executable to run + +Go stack env vars: +- `DEVSTACK_L1EL_KIND=geth` to select geth as default L1 EL node +- `SYSGO_GETH_EXEC_PATH=/path/to/geth` to select the geth executable to run + +Other useful env vars: +- `DISABLE_OP_E2E_LEGACY=true` to disable the op-e2e package from loading build-artifacts that are not used by devstack. diff --git a/op-devstack/devtest/testing.go b/op-devstack/devtest/testing.go index 3faefc7d05794..f368c0ef16f7d 100644 --- a/op-devstack/devtest/testing.go +++ b/op-devstack/devtest/testing.go @@ -79,7 +79,7 @@ type T interface { // This distinguishes the interface from other testing interfaces, // such as the one used at package-level for shared system construction. - _TestOnly() + TestOnly() } // This testing subset is sufficient for the require.Assertions to work. @@ -283,7 +283,7 @@ func (t *testingT) Deadline() (deadline time.Time, ok bool) { return t.t.Deadline() } -func (t *testingT) _TestOnly() { +func (t *testingT) TestOnly() { panic("do not use - this method only forces the interface to be unique") } diff --git a/op-devstack/dsl/bridge.go b/op-devstack/dsl/bridge.go index dec590b13017f..8f59070d7d56f 100644 --- a/op-devstack/dsl/bridge.go +++ b/op-devstack/dsl/bridge.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "math/big" + "strings" "time" "github.com/ethereum-optimism/optimism/op-chain-ops/crossdomain" @@ -129,7 +130,19 @@ func (b *StandardBridge) RespectedGameType() uint32 { return gameType } +func (b *StandardBridge) PortalVersion() string { + version, err := contractio.Read(b.l1Portal.Version(), b.ctx) + b.require.NoError(err, "Failed to read portal version") + return version +} + func (b *StandardBridge) UsesSuperRoots() bool { + // Only interop contracts have SuperRootsActive functionality + version := b.PortalVersion() + if !strings.HasSuffix(version, "+interop") { + return false + } + superRootsActive, err := contractio.Read(b.l1Portal.SuperRootsActive(), b.ctx) b.require.NoError(err, "Failed to read super roots active") return superRootsActive @@ -555,5 +568,14 @@ func gasCost(rcpt *types.Receipt) eth.ETH { if rcpt.L1Fee != nil { cost = cost.Add(eth.WeiBig(rcpt.L1Fee)) } + if rcpt.OperatorFeeConstant != nil && rcpt.OperatorFeeScalar != nil { + // https://github.com/ethereum-optimism/op-geth/blob/6005dd53e1b50fe5a3f59764e3e2056a639eff2f/core/types/rollup_cost.go#L244-L247 + // Also see: https://specs.optimism.io/protocol/isthmus/exec-engine.html#operator-operatorCost + operatorCost := new(big.Int).SetUint64(rcpt.GasUsed) + operatorCost.Mul(operatorCost, new(big.Int).SetUint64(*rcpt.OperatorFeeScalar)) + operatorCost = operatorCost.Div(operatorCost, big.NewInt(1_000_000)) + operatorCost = operatorCost.Add(operatorCost, new(big.Int).SetUint64(*rcpt.OperatorFeeConstant)) + cost = cost.Add(eth.WeiBig(operatorCost)) + } return cost } diff --git a/op-devstack/dsl/contract/call.go b/op-devstack/dsl/contract/call.go index 1daa8478a5c71..d378cd8f00031 100644 --- a/op-devstack/dsl/contract/call.go +++ b/op-devstack/dsl/contract/call.go @@ -2,8 +2,11 @@ package contract import ( "fmt" + "math/big" "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/errutil" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" "github.com/ethereum-optimism/optimism/op-service/txplan" @@ -31,12 +34,25 @@ func Read[O any](call bindings.TypedCall[O], opts ...txplan.Option) O { return o } +// ReadArray retrieves all data from an array in batches +func ReadArray[T any](countCall bindings.TypedCall[*big.Int], elemCall func(i *big.Int) bindings.TypedCall[T]) []T { + checkTestable(countCall) + test := countCall.Test() + ctx := countCall.Test().Ctx() + + caller := countCall.Client().NewMultiCaller(batching.DefaultBatchSize) + + o, err := contractio.ReadArray(ctx, caller, countCall, elemCall) + test.Require().NoError(err) + return o +} + // Write makes a user to write a tx by using the planned contract bindings func Write[O any](user *dsl.EOA, call bindings.TypedCall[O], opts ...txplan.Option) *types.Receipt { checkTestable(call) finalOpts := txplan.Combine(user.Plan(), txplan.Combine(opts...)) o, err := contractio.Write(call, call.Test().Ctx(), finalOpts) - call.Test().Require().NoError(err) + call.Test().Require().NoError(err, "contract write failed: %v", errutil.TryAddRevertReason(err)) return o } diff --git a/op-devstack/dsl/ecotone_fees.go b/op-devstack/dsl/ecotone_fees.go index 3412f893c4912..d1970a94b926b 100644 --- a/op-devstack/dsl/ecotone_fees.go +++ b/op-devstack/dsl/ecotone_fees.go @@ -54,29 +54,40 @@ func (ef *EcotoneFees) ValidateTransaction(from *EOA, to *EOA, amount *big.Int) ef.require.NoError(err) ef.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + // Get block info for base fee information + blockInfo, err := client.InfoByHash(ef.ctx, receipt.BlockHash) + ef.require.NoError(err) + endBalance := from.GetBalance() vaultsAfter := ef.getVaultBalances(client) vaultIncreases := ef.calculateVaultIncreases(vaultsBefore, vaultsAfter) - l1Fee := big.NewInt(0) - if receipt.L1Fee != nil { - l1Fee = receipt.L1Fee - } + // In Ecotone, L1 fee includes both base fee and blob base fee components + l1Fee := vaultIncreases.L1FeeVault // Use actual vault increase as the source of truth - block, err := client.InfoByHash(ef.ctx, receipt.BlockHash) - ef.require.NoError(err) + // Calculate receipt-based fees for validation + receiptBaseFee := new(big.Int).Mul(blockInfo.BaseFee(), big.NewInt(int64(receipt.GasUsed))) + receiptL2Fee := new(big.Int).Mul(receipt.EffectiveGasPrice, big.NewInt(int64(receipt.GasUsed))) + + // Calculate L2 fees from vault increases + baseFee := vaultIncreases.BaseFeeVault // Use actual vault increase as the source of truth + priorityFee := vaultIncreases.SequencerVault // Use actual vault increase as the source of truth + l2Fee := new(big.Int).Add(baseFee, priorityFee) - baseFee := new(big.Int).Mul(block.BaseFee(), big.NewInt(int64(receipt.GasUsed))) - l2Fee := new(big.Int).Mul(receipt.EffectiveGasPrice, big.NewInt(int64(receipt.GasUsed))) - priorityFee := new(big.Int).Sub(l2Fee, baseFee) - totalFee := new(big.Int).Add(l1Fee, l2Fee) + // Total fee is the sum of all vault increases (excluding OperatorVault which should be zero in Ecotone) + totalFee := new(big.Int).Add(vaultIncreases.BaseFeeVault, vaultIncreases.L1FeeVault) + totalFee.Add(totalFee, vaultIncreases.SequencerVault) walletBalanceDiff := new(big.Int).Sub(startBalance.ToBig(), endBalance.ToBig()) walletBalanceDiff.Sub(walletBalanceDiff, amount) - ef.validateFeeDistribution(l1Fee, baseFee, priorityFee, vaultIncreases) + // Validate total balance first to ensure all fees are accounted for ef.validateTotalBalance(walletBalanceDiff, totalFee, vaultIncreases) + + // Then validate individual fee components + ef.validateFeeDistribution(l1Fee, baseFee, priorityFee, vaultIncreases) ef.validateEcotoneFeatures(receipt, l1Fee) + ef.validateReceiptFees(receipt, l1Fee, baseFee, l2Fee, receiptBaseFee, receiptL2Fee) return EcotoneFeesValidationResult{ TransactionReceipt: receipt, @@ -129,13 +140,15 @@ func (ef *EcotoneFees) validateFeeDistribution(l1Fee, baseFee, priorityFee *big. ef.require.Equal(baseFee, vaults.BaseFeeVault, "Base fee must match BaseFeeVault increase") ef.require.Equal(priorityFee, vaults.SequencerVault, "Priority fee must match SequencerFeeVault increase") - ef.require.True(vaults.OperatorVault.Sign() >= 0, "Operator vault increase must be non-negative") + // In Ecotone, operator fees should not exist (introduced in Isthmus) + ef.require.Equal(vaults.OperatorVault.Cmp(big.NewInt(0)), 0, + "Operator vault increase must be zero in Ecotone (operator fees introduced in Isthmus)") } func (ef *EcotoneFees) validateTotalBalance(walletDiff *big.Int, totalFee *big.Int, vaults VaultBalances) { + // In Ecotone, only BaseFeeVault, L1FeeVault, and SequencerVault should have increases totalVaultIncrease := new(big.Int).Add(vaults.BaseFeeVault, vaults.L1FeeVault) totalVaultIncrease.Add(totalVaultIncrease, vaults.SequencerVault) - totalVaultIncrease.Add(totalVaultIncrease, vaults.OperatorVault) ef.require.Equal(walletDiff, totalFee, "Wallet balance difference must equal total fees") ef.require.Equal(totalVaultIncrease, totalFee, "Total vault increases must equal total fees") @@ -149,6 +162,27 @@ func (ef *EcotoneFees) validateEcotoneFeatures(receipt *types.Receipt, l1Fee *bi ef.require.Greater(receipt.EffectiveGasPrice.Uint64(), uint64(0), "Effective gas price should be > 0") } +func (ef *EcotoneFees) validateReceiptFees(receipt *types.Receipt, l1Fee, vaultBaseFee, vaultL2Fee, receiptBaseFee, receiptL2Fee *big.Int) { + // Check that receipt's L1Fee matches the vault increase + if receipt.L1Fee != nil { + ef.require.Equal(receipt.L1Fee, l1Fee, "Receipt L1Fee must match L1FeeVault increase") + } + + // Sanity check: Receipt-calculated fees should match vault-based fees + ef.require.Equal(receiptBaseFee, vaultBaseFee, + "Receipt-calculated base fee (block.BaseFee * gasUsed) must match BaseFeeVault increase") + ef.require.Equal(receiptL2Fee, vaultL2Fee, + "Receipt-calculated L2 fee (effectiveGasPrice * gasUsed) must match L2 vault increases (BaseFee + SequencerFee)") + + // Validate receipt-based calculations are positive + ef.require.True(receiptBaseFee.Sign() > 0, "Receipt-based base fee must be positive") + ef.require.True(receiptL2Fee.Sign() > 0, "Receipt-based L2 fee must be positive") + + // The effective gas price should be consistent with the calculated L2 fee + ef.require.Equal(receiptL2Fee.Cmp(receiptBaseFee) >= 0, true, + "Receipt L2 fee (effectiveGasPrice * gasUsed) should be >= base fee") +} + func (ef *EcotoneFees) LogResults(result EcotoneFeesValidationResult) { ef.log.Info("Comprehensive Ecotone fees validation completed", "gasUsed", result.TransactionReceipt.GasUsed, diff --git a/op-devstack/dsl/el.go b/op-devstack/dsl/el.go index 1a25caef43b11..b7e4d97251a72 100644 --- a/op-devstack/dsl/el.go +++ b/op-devstack/dsl/el.go @@ -103,13 +103,28 @@ func (el *elNode) waitForNextBlock(blocksFromNow uint64) eth.BlockRef { return newRef } +// WaitForTime waits until the chain has reached or surpassed the given timestamp. +func (el *elNode) WaitForTime(timestamp uint64) eth.BlockRef { + for range time.Tick(500 * time.Millisecond) { + ref, err := el.inner.EthClient().BlockRefByLabel(el.ctx, eth.Unsafe) + el.require.NoError(err) + if ref.Time >= timestamp { + return ref + } + } + return eth.BlockRef{} // Should never be reached. +} + func (el *elNode) stackEL() stack.ELNode { return el.inner } +// WaitForFinalization waits for the current block height to be finalized. Note that it does not +// ensure that the finalized block is the same as the current unsafe block (i.e., it is not +// reorg-aware). func (el *elNode) WaitForFinalization() eth.BlockRef { // Get current block and wait for it to be finalized - currentBlock, err := el.inner.EthClient().InfoByLabel(el.ctx, eth.Finalized) + currentBlock, err := el.inner.EthClient().InfoByLabel(el.ctx, eth.Unsafe) el.require.NoError(err, "Expected to get current block from execution client") var finalizedBlock eth.BlockRef @@ -119,7 +134,7 @@ func (el *elNode) WaitForFinalization() eth.BlockRef { if err != nil { return false } - if block.NumberU64() > currentBlock.NumberU64() { + if block.NumberU64() >= currentBlock.NumberU64() { finalizedBlock = eth.InfoToL1BlockRef(block) return true } diff --git a/op-devstack/dsl/engine.go b/op-devstack/dsl/engine.go new file mode 100644 index 0000000000000..b01659e7b6cb4 --- /dev/null +++ b/op-devstack/dsl/engine.go @@ -0,0 +1,75 @@ +package dsl + +import ( + "errors" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/retry" +) + +type NewPayloadResult struct { + T devtest.T + Status *eth.PayloadStatusV1 + Err error +} + +func (r *NewPayloadResult) IsPayloadStatus(status eth.ExecutePayloadStatus) *NewPayloadResult { + r.T.Require().NotNil(r.Status, "payload status nil") + r.T.Require().Equal(status, r.Status.Status) + return r +} + +func (r *NewPayloadResult) IsSyncing() *NewPayloadResult { + r.IsPayloadStatus(eth.ExecutionSyncing) + r.T.Require().NoError(r.Err) + return r +} + +func (r *NewPayloadResult) IsValid() *NewPayloadResult { + r.IsPayloadStatus(eth.ExecutionValid) + r.T.Require().NoError(r.Err) + return r +} + +type ForkchoiceUpdateResult struct { + T devtest.T + Refresh func() + Result *eth.ForkchoiceUpdatedResult + Err error +} + +func (r *ForkchoiceUpdateResult) IsForkchoiceUpdatedStatus(status eth.ExecutePayloadStatus) *ForkchoiceUpdateResult { + r.T.Require().NotNil(r.Result, "fcu result nil") + r.T.Require().Equal(status, r.Result.PayloadStatus.Status) + return r +} + +func (r *ForkchoiceUpdateResult) IsSyncing() *ForkchoiceUpdateResult { + r.IsForkchoiceUpdatedStatus(eth.ExecutionSyncing) + r.T.Require().NoError(r.Err) + return r +} + +func (r *ForkchoiceUpdateResult) IsValid() *ForkchoiceUpdateResult { + r.IsForkchoiceUpdatedStatus(eth.ExecutionValid) + r.T.Require().NoError(r.Err) + return r +} + +func (r *ForkchoiceUpdateResult) WaitUntilValid(attempts int) *ForkchoiceUpdateResult { + tryCnt := 0 + err := retry.Do0(r.T.Ctx(), attempts, &retry.FixedStrategy{Dur: 1 * time.Second}, + func() error { + r.Refresh() + tryCnt += 1 + if r.Result.PayloadStatus.Status != eth.ExecutionValid { + r.T.Log("Wait for FCU to return valid", "status", r.Result.PayloadStatus.Status, "try_count", tryCnt) + return errors.New("still syncing") + } + return nil + }) + r.T.Require().NoError(err) + return r +} diff --git a/op-devstack/dsl/eoa.go b/op-devstack/dsl/eoa.go index 6d9d54b938e80..49ce2063c48d4 100644 --- a/op-devstack/dsl/eoa.go +++ b/op-devstack/dsl/eoa.go @@ -6,10 +6,6 @@ import ( "math/rand" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" @@ -20,6 +16,8 @@ import ( txIntentBindings "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) // EOA is an Externally-Owned-Account: @@ -81,14 +79,26 @@ func (u *EOA) Plan() txplan.Option { ) } +func (u *EOA) PlanAuth(code common.Address) txplan.Option { + toAddr := u.Address() + return txplan.Combine( + u.Plan(), + txplan.WithType(types.SetCodeTxType), + txplan.WithTo(&toAddr), + txplan.WithAuthorizationTo(code), + // Set a fixed gas limit because eth_estimateGas doesn't consider authorizations yet. + txplan.WithGasLimit(75_000), + ) +} + // PlanTransfer creates the tx-plan options to perform a transfer // of the given amount of ETH to the given account. func (u *EOA) PlanTransfer(to common.Address, amount eth.ETH) txplan.Option { return txplan.Combine( u.Plan(), txplan.WithTo(&to), - txplan.WithValue(amount.ToBig()), - txplan.WithGasLimit(params.TxGas), + txplan.WithValue(amount), + // Don't set gas explicitly since the transfer might be to a contract ) } @@ -143,8 +153,8 @@ func (u *EOA) VerifyBalanceAtLeast(v eth.ETH) { func (u *EOA) WaitForBalance(v eth.ETH) { u.t.Require().Eventually(func() bool { - u.VerifyBalanceExact(v) - return true + actual := u.balance() + return actual == v }, u.el.stackEL().TransactionTimeout(), time.Second, "awaiting balance to be updated") } diff --git a/op-devstack/dsl/fb_builder.go b/op-devstack/dsl/fb_builder.go index 57b49baa82158..0820d9c46dbfa 100644 --- a/op-devstack/dsl/fb_builder.go +++ b/op-devstack/dsl/fb_builder.go @@ -51,5 +51,5 @@ func (c *FlashblocksBuilderNode) Conductor() *Conductor { } func (c *FlashblocksBuilderNode) ListenFor(logger log.Logger, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { - return websocketListenFor(logger, c.inner.FlashblocksWsUrl(), duration, output, done) + return websocketListenFor(logger, c.inner.FlashblocksWsUrl(), c.inner.FlashblocksWsHeaders(), duration, output, done) } diff --git a/op-devstack/dsl/fb_ws_proxy.go b/op-devstack/dsl/fb_ws_proxy.go index fa20cd0b2741d..72fa8c5e3a66d 100644 --- a/op-devstack/dsl/fb_ws_proxy.go +++ b/op-devstack/dsl/fb_ws_proxy.go @@ -2,6 +2,7 @@ package dsl import ( "fmt" + "net/http" "strings" "time" @@ -41,29 +42,53 @@ func (c *FlashblocksWebsocketProxy) Escape() stack.FlashblocksWebsocketProxy { } func (c *FlashblocksWebsocketProxy) ListenFor(logger log.Logger, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { - return websocketListenFor(logger, c.Escape().WsUrl(), duration, output, done) + wsURL := c.Escape().WsUrl() + headers := c.Escape().WsHeaders() + return websocketListenFor(logger, wsURL, headers, duration, output, done) } -func websocketListenFor(logger log.Logger, wsURL string, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { +func websocketListenFor(logger log.Logger, wsURL string, headers http.Header, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { defer close(done) - logger.Debug("Testing WebSocket connection to", "url", wsURL) + logger.Debug("Testing WebSocket connection to", "url", wsURL, "headers", headers) + + // Log the headers for debug purposes + if headers != nil { + for key, values := range headers { + logger.Debug("Header", "key", key, "values", values) + } + } else { + logger.Debug("No headers provided") + } dialer := &websocket.Dialer{ HandshakeTimeout: 6 * time.Second, } - conn, _, err := dialer.Dial(wsURL, nil) + // Always close the response body to prevent resource leaks + logger.Debug("Attempting WebSocket connection", "url", wsURL) + conn, resp, err := dialer.Dial(wsURL, headers) if err != nil { + logger.Error("WebSocket connection failed", "url", wsURL, "error", err) + if resp != nil { + logger.Error("HTTP response details", "status", resp.Status, "headers", resp.Header) + resp.Body.Close() + } return fmt.Errorf("failed to connect to Flashblocks WebSocket endpoint %s: %w", wsURL, err) } + + if resp != nil { + defer resp.Body.Close() + } defer conn.Close() - logger.Info("WebSocket connection established, reading stream for %s", duration) + logger.Info("WebSocket connection established successfully", "url", wsURL, "reading stream for", duration) timeout := time.After(duration) + messageCount := 0 for { select { case <-timeout: + logger.Info("WebSocket read timeout reached", "total_messages", messageCount) return nil default: err = conn.SetReadDeadline(time.Now().Add(duration)) @@ -72,12 +97,17 @@ func websocketListenFor(logger log.Logger, wsURL string, duration time.Duration, } _, message, err := conn.ReadMessage() if err != nil && !strings.Contains(err.Error(), "timeout") { + logger.Error("Error reading WebSocket message", "error", err, "message_count", messageCount) return fmt.Errorf("error reading WebSocket message: %w", err) } if err == nil { + messageCount++ + logger.Debug("Received WebSocket message", "message_count", messageCount, "message_length", len(message)) select { case output <- message: + logger.Debug("Message sent to output channel", "message_count", messageCount) case <-timeout: // to avoid indefinite hang + logger.Info("Timeout while sending message to output channel", "total_messages", messageCount) return nil } } diff --git a/op-devstack/dsl/fjord_fees.go b/op-devstack/dsl/fjord_fees.go new file mode 100644 index 0000000000000..b27566634c71c --- /dev/null +++ b/op-devstack/dsl/fjord_fees.go @@ -0,0 +1,369 @@ +package dsl + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" + "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +type FjordFees struct { + commonImpl + l2Network *L2Network +} + +type FjordFeesValidationResult struct { + TransactionReceipt *types.Receipt + L1Fee *big.Int + L2Fee *big.Int + BaseFee *big.Int + PriorityFee *big.Int + TotalFee *big.Int + VaultBalances VaultBalances + WalletBalanceDiff *big.Int + TransferAmount *big.Int + FastLzSize uint64 + EstimatedBrotliSize *big.Int + OperatorFee *big.Int + CoinbaseDiff *big.Int +} + +func NewFjordFees(t devtest.T, l2Network *L2Network) *FjordFees { + return &FjordFees{ + commonImpl: commonFromT(t), + l2Network: l2Network, + } +} + +// ValidateTransaction validates the transaction and returns the validation result +func (ff *FjordFees) ValidateTransaction(from *EOA, to *EOA, amount *big.Int) FjordFeesValidationResult { + client := ff.l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + + startBalance := from.GetBalance() + vaultsBefore := ff.getVaultBalances(client) + coinbaseStartBalance := ff.getCoinbaseBalance(client) + + tx := from.Transfer(to.Address(), eth.WeiBig(amount)) + receipt, err := tx.Included.Eval(ff.ctx) + ff.require.NoError(err) + ff.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + + endBalance := from.GetBalance() + vaultsAfter := ff.getVaultBalances(client) + vaultIncreases := ff.calculateVaultIncreases(vaultsBefore, vaultsAfter) + coinbaseEndBalance := ff.getCoinbaseBalance(client) + coinbaseDiff := new(big.Int).Sub(coinbaseEndBalance, coinbaseStartBalance) + + l1Fee := big.NewInt(0) + if receipt.L1Fee != nil { + l1Fee = receipt.L1Fee + } + + block, err := client.InfoByHash(ff.ctx, receipt.BlockHash) + ff.require.NoError(err) + + baseFee := new(big.Int).Mul(block.BaseFee(), big.NewInt(int64(receipt.GasUsed))) + totalGasFee := new(big.Int).Mul(receipt.EffectiveGasPrice, big.NewInt(int64(receipt.GasUsed))) + priorityFee := new(big.Int).Sub(totalGasFee, baseFee) + + l2Fee := new(big.Int).Set(priorityFee) + + operatorFee := vaultIncreases.OperatorVault + + ff.validateVaultIncreaseFees(l2Fee, baseFee, priorityFee, l1Fee, operatorFee, coinbaseDiff, vaultsAfter, vaultsBefore) + + totalFee := new(big.Int).Add(l1Fee, l2Fee) + totalFee.Add(totalFee, baseFee) + totalFee.Add(totalFee, operatorFee) + + walletBalanceDiff := new(big.Int).Sub(startBalance.ToBig(), endBalance.ToBig()) + walletBalanceDiff.Sub(walletBalanceDiff, amount) + + fastLzSize, estimatedBrotliSize := ff.validateFjordFeatures(receipt, l1Fee) + ff.validateFeeDistribution(l1Fee, baseFee, priorityFee, operatorFee, vaultIncreases) + ff.validateTotalBalance(walletBalanceDiff, totalFee, vaultIncreases) + + return FjordFeesValidationResult{ + TransactionReceipt: receipt, + L1Fee: l1Fee, + L2Fee: l2Fee, + BaseFee: baseFee, + PriorityFee: priorityFee, + TotalFee: totalFee, + VaultBalances: vaultIncreases, + WalletBalanceDiff: walletBalanceDiff, + TransferAmount: amount, + FastLzSize: fastLzSize, + EstimatedBrotliSize: estimatedBrotliSize, + OperatorFee: operatorFee, + CoinbaseDiff: coinbaseDiff, + } +} + +// getVaultBalances gets the balances of the vaults +func (ff *FjordFees) getVaultBalances(client apis.EthClient) VaultBalances { + baseFee := ff.getBalance(client, predeploys.BaseFeeVaultAddr) + l1Fee := ff.getBalance(client, predeploys.L1FeeVaultAddr) + sequencer := ff.getBalance(client, predeploys.SequencerFeeVaultAddr) + operator := ff.getBalance(client, predeploys.OperatorFeeVaultAddr) + + return VaultBalances{ + BaseFeeVault: baseFee, + L1FeeVault: l1Fee, + SequencerVault: sequencer, + OperatorVault: operator, + } +} + +// getBalance gets the balance of an address +func (ff *FjordFees) getBalance(client apis.EthClient, addr common.Address) *big.Int { + balance, err := client.BalanceAt(ff.ctx, addr, nil) + ff.require.NoError(err) + return balance +} + +// calculateVaultIncreases calculates the increases in the vaults +func (ff *FjordFees) calculateVaultIncreases(before, after VaultBalances) VaultBalances { + return VaultBalances{ + BaseFeeVault: new(big.Int).Sub(after.BaseFeeVault, before.BaseFeeVault), + L1FeeVault: new(big.Int).Sub(after.L1FeeVault, before.L1FeeVault), + SequencerVault: new(big.Int).Sub(after.SequencerVault, before.SequencerVault), + OperatorVault: new(big.Int).Sub(after.OperatorVault, before.OperatorVault), + } +} + +// validateFjordFeatures validates that the features of the Fjord transaction are correct +func (ff *FjordFees) validateFjordFeatures(receipt *types.Receipt, l1Fee *big.Int) (uint64, *big.Int) { + ff.require.NotNil(receipt.L1Fee, "L1 fee should be present in Fjord") + ff.require.True(l1Fee.Cmp(big.NewInt(0)) > 0, "L1 fee should be greater than 0 in Fjord") + + client := ff.l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + + _, txs, err := client.InfoAndTxsByHash(ff.ctx, receipt.BlockHash) + ff.require.NoError(err) + + var signedTx *types.Transaction + for _, tx := range txs { + if tx.Hash() == receipt.TxHash { + signedTx = tx + break + } + } + ff.require.NotNil(signedTx, "should find the signed transaction") + + unsignedTx := types.NewTx(&types.DynamicFeeTx{ + Nonce: signedTx.Nonce(), + To: signedTx.To(), + Value: signedTx.Value(), + Gas: signedTx.Gas(), + GasFeeCap: signedTx.GasFeeCap(), + GasTipCap: signedTx.GasTipCap(), + Data: signedTx.Data(), + }) + + txUnsigned, err := unsignedTx.MarshalBinary() + ff.require.NoError(err) + txSigned, err := signedTx.MarshalBinary() + ff.require.NoError(err) + + fastLzSizeUnsigned := uint64(types.FlzCompressLen(txUnsigned) + 68) // overhead used by the original test + fastLzSizeSigned := uint64(types.FlzCompressLen(txSigned)) + + // Validate that FastLZ compression produces reasonable results + ff.require.Greater(fastLzSizeUnsigned, uint64(0), "FastLZ size should be positive") + ff.require.Greater(fastLzSizeSigned, uint64(0), "FastLZ size should be positive") + + txLenGPO := len(txUnsigned) + 68 + flzUpperBound := uint64(txLenGPO + txLenGPO/255 + 16) + ff.require.LessOrEqual(fastLzSizeUnsigned, flzUpperBound, "Compressed size should not exceed upper bound") + + signedUpperBound := uint64(len(txSigned) + len(txSigned)/255 + 16) + ff.require.LessOrEqual(fastLzSizeSigned, signedUpperBound, "Compressed size should not exceed upper bound") + + receiptL1Fee := receipt.L1Fee + if receiptL1Fee == nil { + ff.t.Logf("L1 fee is nil in receipt, skipping L1 fee validation") + return fastLzSizeSigned, nil + } + + expectedFee, err := CalculateFjordL1Cost(ff.ctx, client, signedTx.RollupCostData(), receipt.BlockHash) + ff.require.NoError(err, "should calculate L1 fee") + + ff.require.Equalf(expectedFee, receiptL1Fee, "Calculated L1 fee should match receipt L1 fee (expected=%s actual=%s)", expectedFee.String(), receiptL1Fee.String()) + + ff.require.Equalf(expectedFee, receipt.L1Fee, "L1 fee in receipt must be correct (expected=%s actual=%s)", expectedFee.String(), receipt.L1Fee.String()) + + return fastLzSizeSigned, expectedFee +} + +// validateFeeDistribution validates that the fees are distributed correctly to the vaults +func (ff *FjordFees) validateFeeDistribution(l1Fee, baseFee, priorityFee, operatorFee *big.Int, vaults VaultBalances) { + ff.require.True(l1Fee.Sign() >= 0, "L1 fee must be non-negative") + ff.require.True(baseFee.Sign() > 0, "Base fee must be positive") + ff.require.True(priorityFee.Sign() >= 0, "Priority fee must be non-negative") + ff.require.True(operatorFee.Sign() >= 0, "Operator fee must be non-negative") + + ff.require.Equal(l1Fee, vaults.L1FeeVault, "L1 fee must match L1FeeVault increase") + ff.require.Equal(baseFee, vaults.BaseFeeVault, "Base fee must match BaseFeeVault increase") + ff.require.Equal(priorityFee, vaults.SequencerVault, "Priority fee must match SequencerFeeVault increase") + ff.require.Equal(operatorFee, vaults.OperatorVault, "Operator fee must match OperatorFeeVault increase") +} + +// validateTotalBalance validates that the total balance of the wallet and the vaults is correct +func (ff *FjordFees) validateTotalBalance(walletDiff *big.Int, totalFee *big.Int, vaults VaultBalances) { + totalVaultIncrease := new(big.Int).Add(vaults.BaseFeeVault, vaults.L1FeeVault) + totalVaultIncrease.Add(totalVaultIncrease, vaults.SequencerVault) + totalVaultIncrease.Add(totalVaultIncrease, vaults.OperatorVault) + + ff.require.Equal(walletDiff, totalFee, "Wallet balance difference must equal total fees") + ff.require.Equal(totalVaultIncrease, totalFee, "Total vault increases must equal total fees") +} + +// getCoinbaseBalance gets the balance of the coinbase address (block miner/sequencer) +func (ff *FjordFees) getCoinbaseBalance(client apis.EthClient) *big.Int { + block, err := client.InfoByLabel(ff.ctx, "latest") + ff.require.NoError(err, "should get latest block") + + coinbase := block.Coinbase() + balance, err := client.BalanceAt(ff.ctx, coinbase, nil) + ff.require.NoError(err, "should get coinbase balance") + return balance +} + +// validateVaultIncreaseFees validates that the fees are distributed correctly to the vaults +func (ff *FjordFees) validateVaultIncreaseFees( + l2Fee, baseFee, priorityFee, l1Fee, operatorFee, coinbaseDiff *big.Int, + vaultsAfter, vaultsBefore VaultBalances) { + + ff.require.Equal(l2Fee, coinbaseDiff, "L2 fee must equal coinbase difference (coinbase is always sequencer fee vault)") + + vaultsIncrease := ff.calculateVaultIncreases(vaultsBefore, vaultsAfter) + ff.require.Equal(baseFee, vaultsIncrease.BaseFeeVault, "base fee must match BaseFeeVault increase") + + ff.require.Equal(priorityFee, vaultsIncrease.SequencerVault, "priority fee must match SequencerFeeVault increase") + + ff.require.Equal(l1Fee, vaultsIncrease.L1FeeVault, "L1 fee must match L1FeeVault increase") + + ff.require.Equal(operatorFee, vaultsIncrease.OperatorVault, "operator fee must match OperatorFeeVault increase") + + ff.t.Logf("Comprehensive fee validation passed:") + ff.t.Logf(" L2 Fee: %s (coinbase diff: %s)", l2Fee, coinbaseDiff) + ff.t.Logf(" Base Fee: %s (vault increase: %s)", baseFee, vaultsIncrease.BaseFeeVault) + ff.t.Logf(" Priority Fee: %s (vault increase: %s)", priorityFee, vaultsIncrease.SequencerVault) + ff.t.Logf(" L1 Fee: %s (vault increase: %s)", l1Fee, vaultsIncrease.L1FeeVault) + ff.t.Logf(" Operator Fee: %s (vault increase: %s)", operatorFee, vaultsIncrease.OperatorVault) +} + +// FindSignedTransactionFromReceipt finds the signed transaction from a receipt and block +func FindSignedTransactionFromReceipt(ctx context.Context, client apis.EthClient, receipt *types.Receipt) (*types.Transaction, error) { + _, txs, err := client.InfoAndTxsByHash(ctx, receipt.BlockHash) + if err != nil { + return nil, err + } + + for _, tx := range txs { + if tx.Hash() == receipt.TxHash { + return tx, nil + } + } + return nil, fmt.Errorf("signed transaction not found for hash %s", receipt.TxHash) +} + +// CreateUnsignedTransactionFromSigned creates an unsigned transaction from a signed one +func CreateUnsignedTransactionFromSigned(signedTx *types.Transaction) (*types.Transaction, error) { + return types.NewTx(&types.DynamicFeeTx{ + Nonce: signedTx.Nonce(), + To: signedTx.To(), + Value: signedTx.Value(), + Gas: signedTx.Gas(), + GasFeeCap: signedTx.GasFeeCap(), + GasTipCap: signedTx.GasTipCap(), + Data: signedTx.Data(), + }), nil +} + +// ReadGasPriceOracleL1FeeAt reads the L1 fee from GasPriceOracle for an unsigned transaction +// evaluated against a specific L2 block hash. +func ReadGasPriceOracleL1FeeAt(ctx context.Context, client apis.EthClient, gpo *bindings.GasPriceOracle, txUnsigned []byte, blockHash common.Hash) (*big.Int, error) { + overrideBlockOpt := func(ptx *txplan.PlannedTx) { + ptx.AgainstBlock.Fn(func(ctx context.Context) (eth.BlockInfo, error) { + return client.InfoByHash(ctx, blockHash) + }) + } + result, err := contractio.Read(gpo.GetL1Fee(txUnsigned), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + return result.ToBig(), nil +} + +// ReadGasPriceOracleL1FeeUpperBoundAt reads the L1 fee upper bound for a tx length pinned to a block hash. +func ReadGasPriceOracleL1FeeUpperBoundAt(ctx context.Context, client apis.EthClient, gpo *bindings.GasPriceOracle, txLen int, blockHash common.Hash) (*big.Int, error) { + overrideBlockOpt := func(ptx *txplan.PlannedTx) { + ptx.AgainstBlock.Fn(func(ctx context.Context) (eth.BlockInfo, error) { + return client.InfoByHash(ctx, blockHash) + }) + } + result, err := contractio.Read(gpo.GetL1FeeUpperBound(big.NewInt(int64(txLen))), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + return result.ToBig(), nil +} + +// ValidateL1FeeMatches checks that the calculated L1 fee matches the actual receipt L1 fee +func ValidateL1FeeMatches(t devtest.T, calculatedFee, receiptFee *big.Int) { + require := t.Require() + require.NotNil(receiptFee, "L1 fee should be present in receipt") + require.Equalf(calculatedFee.Uint64(), receiptFee.Uint64(), "L1 fee mismatch (expected=%d actual=%d)", calculatedFee.Uint64(), receiptFee.Uint64()) +} + +// CalculateFjordL1Cost calculates L1 cost using Fjord formula with block-specific L1 state +func CalculateFjordL1Cost(ctx context.Context, client apis.EthClient, rollupCostData types.RollupCostData, blockHash common.Hash) (*big.Int, error) { + l1Block := bindings.NewL1Block( + bindings.WithClient(client), + bindings.WithTo(predeploys.L1BlockAddr), + ) + + overrideBlockOpt := func(ptx *txplan.PlannedTx) { + ptx.AgainstBlock.Fn(func(ctx context.Context) (eth.BlockInfo, error) { + return client.InfoByHash(ctx, blockHash) + }) + } + + baseFeeScalar, err := contractio.Read(l1Block.BasefeeScalar(), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + l1BaseFee, err := contractio.Read(l1Block.Basefee(), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + blobBaseFeeScalar, err := contractio.Read(l1Block.BlobBaseFeeScalar(), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + blobBaseFee, err := contractio.Read(l1Block.BlobBaseFee(), ctx, overrideBlockOpt) + if err != nil { + return nil, err + } + + costFunc := types.NewL1CostFuncFjord( + l1BaseFee, + blobBaseFee, + new(big.Int).SetUint64(uint64(baseFeeScalar)), + new(big.Int).SetUint64(uint64(blobBaseFeeScalar))) + + fee, _ := costFunc(rollupCostData) + return fee, nil +} diff --git a/op-devstack/dsl/funder.go b/op-devstack/dsl/funder.go index 6bac8fcda67f7..525d716a97280 100644 --- a/op-devstack/dsl/funder.go +++ b/op-devstack/dsl/funder.go @@ -68,6 +68,9 @@ func (f *Funder) FundAtLeast(wallet *EOA, amount eth.ETH) eth.ETH { if currentBalance.Lt(amount) { missing := amount.Sub(currentBalance) f.faucet.Fund(wallet.Address(), missing) + finalBalance := currentBalance.Add(missing) + wallet.WaitForBalance(finalBalance) + return finalBalance } return currentBalance } diff --git a/op-devstack/dsl/hd_wallet.go b/op-devstack/dsl/hd_wallet.go index 979ab65959690..4e8a39d524665 100644 --- a/op-devstack/dsl/hd_wallet.go +++ b/op-devstack/dsl/hd_wallet.go @@ -5,7 +5,9 @@ import ( "os" "sync/atomic" + hdwallet "github.com/ethereum-optimism/go-ethereum-hdwallet" "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -25,6 +27,12 @@ type HDWallet struct { hdWalletName string } +func NewRandomHDWallet(t devtest.T, startIndex uint64) *HDWallet { + mnemonic, err := hdwallet.NewMnemonic(256) + require.NoError(t, err, "failed to generate mnemonic") + return NewHDWallet(t, mnemonic, startIndex) +} + func NewHDWallet(t devtest.T, mnemonic string, startIndex uint64) *HDWallet { hd, err := devkeys.NewSaltedDevKeys(mnemonic, os.Getenv(SaltEnvVar)) t.Require().NoError(err, "must have valid mnemonic") diff --git a/op-devstack/dsl/l2_cl.go b/op-devstack/dsl/l2_cl.go index 96dd6477022b6..d064f0019b162 100644 --- a/op-devstack/dsl/l2_cl.go +++ b/op-devstack/dsl/l2_cl.go @@ -316,6 +316,20 @@ func (cl *L2CLNode) ConnectPeer(peer *L2CLNode) { cl.require.NoError(err, "failed to connect peer") } +func (cl *L2CLNode) IsP2PConnected(peer *L2CLNode) { + myInfo := cl.PeerInfo() + strategy := &retry.ExponentialStrategy{Min: 10 * time.Second, Max: 30 * time.Second, MaxJitter: 250 * time.Millisecond} + err := retry.Do0(cl.ctx, 5, strategy, func() error { + for _, p := range peer.Peers().Peers { + if p.PeerID == myInfo.PeerID { + return nil + } + } + return errors.New("peer not connected yet") + }) + cl.require.NoError(err, "peer not connected") +} + type safeHeadDbMatchOpts struct { minRequiredL2Block *uint64 } @@ -334,3 +348,30 @@ func (cl *L2CLNode) VerifySafeHeadDatabaseMatches(sourceOfTruth *L2CLNode, args sourceOfTruth.AwaitMinL1Processed(l1Block) checkSafeHeadConsistent(cl.t, l1Block, cl, sourceOfTruth, opts.minRequiredL2Block) } + +func (cl *L2CLNode) WaitForNonZeroUnsafeTime(ctx context.Context) *eth.SyncStatus { + require := cl.require + + var ss *eth.SyncStatus + err := retry.Do0(ctx, 10, retry.Fixed(2*time.Second), func() error { + ss = cl.SyncStatus() + require.NotNil(ss, "L2CL should have sync status") + if ss.UnsafeL2.Time == 0 { + return fmt.Errorf("L2CL unsafe time is still zero") + } + return nil + }) + require.NoError(err, "L2CL unsafe time should be set within retry limit") + require.NotZero(ss.UnsafeL2.Time, "L2CL unsafe time should not be zero") + + return ss +} + +func (cl *L2CLNode) SignalTarget(el *L2ELNode, targetNum uint64) { + cl.log.Info("Signaling L2CL", "target", targetNum) + payload := el.PayloadByNumber(targetNum) + err := retry.Do0(cl.ctx, 3, retry.Fixed(2*time.Second), func() error { + return cl.inner.RollupAPI().PostUnsafePayload(cl.ctx, payload) + }) + cl.require.NoErrorf(err, "failed to post unsafe payload via admin API: target %d", targetNum) +} diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index 29fe0b543c528..5d72ad4328b3d 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -53,6 +53,14 @@ func (el *L2ELNode) BlockRefByLabel(label eth.BlockLabel) eth.L2BlockRef { return block } +func (el *L2ELNode) BlockRefByHash(hash common.Hash) eth.L2BlockRef { + ctx, cancel := context.WithTimeout(el.ctx, DefaultTimeout) + defer cancel() + block, err := el.inner.L2EthClient().L2BlockRefByHash(ctx, hash) + el.require.NoError(err, "block not found using block hash") + return block +} + func (el *L2ELNode) AdvancedFn(label eth.BlockLabel, block uint64) CheckFunc { return func() error { initial := el.BlockRefByLabel(label) @@ -90,6 +98,23 @@ func (el *L2ELNode) NotAdvancedFn(label eth.BlockLabel) CheckFunc { } } +func (el *L2ELNode) ReachedFn(label eth.BlockLabel, target uint64, attempts int) CheckFunc { + return func() error { + logger := el.log.With("id", el.inner.ID(), "chain", el.ChainID(), "label", label, "target", target) + logger.Info("Expecting L2EL to reach") + return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: 2 * time.Second}, + func() error { + head := el.BlockRefByLabel(label) + if head.Number >= target { + logger.Info("L2EL advanced", "target", target) + return nil + } + logger.Info("L2EL sync status", "current", head.Number) + return fmt.Errorf("expected head to advance: %s", label) + }) + } +} + func (el *L2ELNode) BlockRefByNumber(num uint64) eth.L2BlockRef { ctx, cancel := context.WithTimeout(el.ctx, DefaultTimeout) defer cancel() @@ -135,6 +160,10 @@ func (el *L2ELNode) Advanced(label eth.BlockLabel, block uint64) { el.require.NoError(el.AdvancedFn(label, block)()) } +func (el *L2ELNode) Reached(label eth.BlockLabel, block uint64, attempts int) { + el.require.NoError(el.ReachedFn(label, block, attempts)()) +} + func (el *L2ELNode) NotAdvanced(label eth.BlockLabel) { el.require.NoError(el.NotAdvancedFn(label)()) } @@ -188,3 +217,40 @@ func (el *L2ELNode) Start() { func (el *L2ELNode) PeerWith(peer *L2ELNode) { sysgo.ConnectP2P(el.ctx, el.require, el.inner.L2EthClient().RPC(), peer.inner.L2EthClient().RPC()) } + +func (el *L2ELNode) DisconnectPeerWith(peer *L2ELNode) { + sysgo.DisconnectP2P(el.ctx, el.require, el.inner.L2EthClient().RPC(), peer.inner.L2EthClient().RPC()) +} + +func (el *L2ELNode) PayloadByNumber(number uint64) *eth.ExecutionPayloadEnvelope { + payload, err := el.inner.L2EthExtendedClient().PayloadByNumber(el.ctx, number) + el.require.NoError(err, "failed to get payload") + return payload +} + +// NewPayload fetches payload for target number from the reference EL Node, and inserts the payload +func (el *L2ELNode) NewPayload(refNode *L2ELNode, number uint64) *NewPayloadResult { + el.log.Info("NewPayload", "number", number, "refNode", refNode) + payload := refNode.PayloadByNumber(number) + status, err := el.inner.L2EngineClient().NewPayload(el.ctx, payload.ExecutionPayload, payload.ParentBeaconBlockRoot) + return &NewPayloadResult{T: el.t, Status: status, Err: err} +} + +// ForkchoiceUpdate fetches FCU target hashes from the reference EL node, and FCU update with attributes +func (el *L2ELNode) ForkchoiceUpdate(refNode *L2ELNode, unsafe, safe, finalized uint64, attr *eth.PayloadAttributes) *ForkchoiceUpdateResult { + result := &ForkchoiceUpdateResult{T: el.t} + refresh := func() { + el.log.Info("ForkchoiceUpdate", "unsafe", unsafe, "safe", safe, "finalized", finalized, "attr", attr, "refNode", refNode) + state := ð.ForkchoiceState{ + HeadBlockHash: refNode.BlockRefByNumber(unsafe).Hash, + SafeBlockHash: refNode.BlockRefByNumber(safe).Hash, + FinalizedBlockHash: refNode.BlockRefByNumber(finalized).Hash, + } + res, err := el.inner.L2EngineClient().ForkchoiceUpdate(el.ctx, state, attr) + result.Result = res + result.Err = err + } + result.Refresh = refresh + result.Refresh() + return result +} diff --git a/op-devstack/dsl/l2_network.go b/op-devstack/dsl/l2_network.go index 6a68ee802b763..fa0572da12670 100644 --- a/op-devstack/dsl/l2_network.go +++ b/op-devstack/dsl/l2_network.go @@ -226,20 +226,23 @@ func (n *L2Network) AwaitActivation(t devtest.T, forkName rollup.ForkName) eth.B el := n.Escape().L2ELNode(match.FirstL2EL) - unsafeHead, err := retry.Do(t.Ctx(), 120, &retry.FixedStrategy{Dur: 500 * time.Millisecond}, func() (eth.BlockRef, error) { - unsafeHead, err := el.EthClient().BlockRefByLabel(t.Ctx(), eth.Unsafe) - if err != nil { - return eth.BlockRef{}, err - } - if !n.inner.RollupConfig().IsActivationBlockForFork(unsafeHead.Time, forkName) { - return eth.BlockRef{}, fmt.Errorf("not %s activation block", forkName) - } - return unsafeHead, nil // success - }) + rollupCfg := n.Escape().RollupConfig() + maybeActivationTime := rollupCfg.ActivationTimeFor(forkName) + require.NotNil(maybeActivationTime, "Required fork is not scheduled for activation") + activationTime := *maybeActivationTime + if activationTime == 0 { + block, err := el.EthClient().BlockRefByNumber(t.Ctx(), 0) + require.NoError(err, "Fork activated at genesis, but failed to get genesis block") + return block.ID() + } + blockNum, err := rollupCfg.TargetBlockNumber(activationTime) require.NoError(err) - t.Logger().Info("Activation block", "block", unsafeHead.ID()) + NewL2ELNode(el, n.control).WaitForBlockNumber(blockNum).ID() + activationBlock, err := el.EthClient().BlockRefByNumber(t.Ctx(), blockNum) + require.NoError(err, "Failed to get activation block") + t.Logger().Info("Activation block", "block", activationBlock.ID()) + return activationBlock.ID() - return unsafeHead.ID() } func (n *L2Network) DisputeGameFactoryProxyAddr() common.Address { diff --git a/op-devstack/dsl/proofs/claim.go b/op-devstack/dsl/proofs/claim.go index 08418883cbc66..51051224e890e 100644 --- a/op-devstack/dsl/proofs/claim.go +++ b/op-devstack/dsl/proofs/claim.go @@ -2,6 +2,7 @@ package proofs import ( "fmt" + "slices" "time" "github.com/ethereum/go-ethereum/common" @@ -17,12 +18,12 @@ const defaultTimeout = 20 * time.Minute type Claim struct { t devtest.T require *require.Assertions - Index int64 + Index uint64 claim bindings.Claim game *FaultDisputeGame } -func newClaim(t devtest.T, require *require.Assertions, claimIndex int64, claim bindings.Claim, game *FaultDisputeGame) *Claim { +func newClaim(t devtest.T, require *require.Assertions, claimIndex uint64, claim bindings.Claim, game *FaultDisputeGame) *Claim { return &Claim{ t: t, require: require, @@ -32,24 +33,46 @@ func newClaim(t devtest.T, require *require.Assertions, claimIndex int64, claim } } +func (c *Claim) String() string { + pos := c.claim.Position + return fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v ClaimHash: %v, Countered By: %v, ParentIndex: %v Claimant: %v Bond: %v\n", + c.Index, pos.ToGIndex(), pos.Depth(), pos.IndexAtDepth(), c.claim.Value.Hex(), c.claim.CounteredBy, c.claim.ParentContractIndex, c.claim.Claimant, c.claim.Bond) +} + func (c *Claim) Value() common.Hash { return c.claim.Value } +func (c *Claim) Claimant() common.Address { + return c.claim.Claimant +} + func (c *Claim) Depth() uint64 { return uint64(c.claim.Depth()) } // WaitForCounterClaim waits for the claim to be countered by another claim being posted. // Return the new claim that counters this claim. -func (c *Claim) WaitForCounterClaim() *Claim { - counterIdx, counterClaim := c.game.waitForClaim(defaultTimeout, fmt.Sprintf("failed to find claim with parent idx %v", c.Index), func(claimIdx int64, claim bindings.Claim) bool { - return int64(claim.ParentContractIndex) == c.Index +func (c *Claim) WaitForCounterClaim(ignoreClaims ...*Claim) *Claim { + counterIdx, counterClaim := c.game.waitForClaim(defaultTimeout, fmt.Sprintf("failed to find claim with parent idx %v", c.Index), func(claimIdx uint64, claim bindings.Claim) bool { + return uint64(claim.ParentContractIndex) == c.Index && !containsClaim(claimIdx, ignoreClaims) }) return newClaim(c.t, c.require, counterIdx, counterClaim, c.game) } +func (c *Claim) VerifyNoCounterClaim() { + for i, claim := range c.game.allClaims() { + c.require.NotEqualValuesf(c.Index, claim.ParentContractIndex, "Found unexpected counter-claim at index %v: %v", i, claim) + } +} + func (c *Claim) Attack(eoa *dsl.EOA, newClaim common.Hash) *Claim { c.game.Attack(eoa, c.Index, newClaim) return c.WaitForCounterClaim() } + +func containsClaim(claimIdx uint64, haystack []*Claim) bool { + return slices.ContainsFunc(haystack, func(candidate *Claim) bool { + return candidate.Index == claimIdx + }) +} diff --git a/op-devstack/dsl/proofs/dispute_game_factory.go b/op-devstack/dsl/proofs/dispute_game_factory.go index e654c878987d2..3dc0166df0440 100644 --- a/op-devstack/dsl/proofs/dispute_game_factory.go +++ b/op-devstack/dsl/proofs/dispute_game_factory.go @@ -5,12 +5,13 @@ import ( "math/big" "time" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" - cTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/contract" @@ -26,7 +27,9 @@ type DisputeGameFactory struct { l1Network *dsl.L1Network ethClient apis.EthClient dgf *bindings.DisputeGameFactory + addr common.Address supervisor *dsl.Supervisor + gameHelper *GameHelper } func NewDisputeGameFactory(t devtest.T, l1Network *dsl.L1Network, ethClient apis.EthClient, dgfAddr common.Address, supervisor *dsl.Supervisor) *DisputeGameFactory { @@ -38,14 +41,17 @@ func NewDisputeGameFactory(t devtest.T, l1Network *dsl.L1Network, ethClient apis log: t.Logger(), l1Network: l1Network, dgf: dgf, + addr: dgfAddr, supervisor: supervisor, ethClient: ethClient, } } type GameCfg struct { - allowFuture bool - allowUnsafe bool + allowFuture bool + allowUnsafe bool + rootClaimSet bool + rootClaim common.Hash } type GameOpt interface { Apply(cfg *GameCfg) @@ -68,6 +74,13 @@ func WithFutureProposal() GameOpt { }) } +func WithRootClaim(claim common.Hash) GameOpt { + return gameOptFn(func(c *GameCfg) { + c.rootClaim = claim + c.rootClaimSet = true + }) +} + func NewGameCfg(opts ...GameOpt) *GameCfg { cfg := &GameCfg{} for _, opt := range opts { @@ -76,6 +89,19 @@ func NewGameCfg(opts ...GameOpt) *GameCfg { return cfg } +func (f *DisputeGameFactory) Address() common.Address { + return f.addr +} + +func (f *DisputeGameFactory) getGameHelper(eoa *dsl.EOA) *GameHelper { + if f.gameHelper != nil { + return f.gameHelper + } + gs := DeployGameHelper(f.t, eoa) + f.gameHelper = gs + return gs +} + func (f *DisputeGameFactory) GameCount() int64 { return contract.Read(f.dgf.GameCount()).Int64() } @@ -83,7 +109,17 @@ func (f *DisputeGameFactory) GameCount() int64 { func (f *DisputeGameFactory) GameAtIndex(idx int64) *FaultDisputeGame { gameInfo := contract.Read(f.dgf.GameAtIndex(big.NewInt(idx))) game := bindings.NewFaultDisputeGame(bindings.WithClient(f.ethClient), bindings.WithTo(gameInfo.Proxy), bindings.WithTest(f.t)) - return NewFaultDisputeGame(f.t, f.require, game) + return NewFaultDisputeGame(f.t, f.require, gameInfo.Proxy, f.getGameHelper, game) +} + +func (f *DisputeGameFactory) GameImpl(gameType challengerTypes.GameType) *FaultDisputeGame { + implAddr := contract.Read(f.dgf.GameImpls(uint32(gameType))) + game := bindings.NewFaultDisputeGame(bindings.WithClient(f.ethClient), bindings.WithTo(implAddr), bindings.WithTest(f.t)) + return NewFaultDisputeGame(f.t, f.require, implAddr, f.getGameHelper, game) +} + +func (f *DisputeGameFactory) GameArgs(gameType challengerTypes.GameType) []byte { + return contract.Read(f.dgf.GameArgs(uint32(gameType))) } func (f *DisputeGameFactory) WaitForGame() *FaultDisputeGame { @@ -98,22 +134,29 @@ func (f *DisputeGameFactory) WaitForGame() *FaultDisputeGame { return f.GameAtIndex(initialCount) } -func (f *DisputeGameFactory) StartSuperCannonGame(eoa *dsl.EOA, rootClaim common.Hash, opts ...GameOpt) *SuperFaultDisputeGame { +func (f *DisputeGameFactory) StartSuperCannonGame(eoa *dsl.EOA, opts ...GameOpt) *SuperFaultDisputeGame { + f.require.NotNil(f.supervisor, "supervisor is required to start super games") proposalTimestamp := f.supervisor.FetchSyncStatus().SafeTimestamp - gameType := uint32(cTypes.SuperCannonGameType) - return f.startSuperCannonGameOfType(eoa, proposalTimestamp, rootClaim, gameType, opts...) + return f.startSuperCannonGameOfType(eoa, proposalTimestamp, challengerTypes.SuperCannonGameType, opts...) } -func (f *DisputeGameFactory) startSuperCannonGameOfType(eoa *dsl.EOA, timestamp uint64, rootClaim common.Hash, gameType uint32, opts ...GameOpt) *SuperFaultDisputeGame { +func (f *DisputeGameFactory) startSuperCannonGameOfType(eoa *dsl.EOA, timestamp uint64, gameType challengerTypes.GameType, opts ...GameOpt) *SuperFaultDisputeGame { cfg := NewGameCfg(opts...) extraData := f.createSuperGameExtraData(timestamp, cfg) - game := f.createNewGame(eoa, gameType, rootClaim, extraData) + rootClaim := cfg.rootClaim + if !cfg.rootClaimSet { + // Default to the correct root claim + response := f.supervisor.FetchSuperRootAtTimestamp(timestamp) + rootClaim = common.Hash(response.SuperRoot) + } + game, addr := f.createNewGame(eoa, gameType, rootClaim, extraData) - return NewSuperFaultDisputeGame(f.t, f.require, game) + return NewSuperFaultDisputeGame(f.t, f.require, addr, f.getGameHelper, game) } func (f *DisputeGameFactory) createSuperGameExtraData(timestamp uint64, cfg *GameCfg) []byte { + f.require.NotNil(f.supervisor, "supervisor is required create super games") if !cfg.allowFuture { f.supervisor.AwaitMinCrossSafeTimestamp(timestamp) } @@ -122,13 +165,13 @@ func (f *DisputeGameFactory) createSuperGameExtraData(timestamp uint64, cfg *Gam return extraData } -func (f *DisputeGameFactory) createNewGame(eoa *dsl.EOA, gameType uint32, claim common.Hash, extraData []byte) *bindings.FaultDisputeGame { +func (f *DisputeGameFactory) createNewGame(eoa *dsl.EOA, gameType challengerTypes.GameType, claim common.Hash, extraData []byte) (*bindings.FaultDisputeGame, common.Address) { f.log.Info("Creating dispute game", "gameType", gameType, "claim", claim.Hex(), "extradata", common.Bytes2Hex(extraData)) // Pull some metadata we need to construct a new game - requiredBonds := contract.Read(f.dgf.InitBonds(gameType)) + requiredBonds := f.initBond(gameType) - receipt := contract.Write(eoa, f.dgf.Create(gameType, claim, extraData), txplan.WithValue(requiredBonds), txplan.WithGasRatio(2)) + receipt := contract.Write(eoa, f.dgf.Create(uint32(gameType), claim, extraData), txplan.WithValue(requiredBonds), txplan.WithGasRatio(2)) f.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) // Extract logs from receipt @@ -138,5 +181,31 @@ func (f *DisputeGameFactory) createNewGame(eoa *dsl.EOA, gameType uint32, claim gameAddr := createdLog.DisputeProxy log.Info("Dispute game created", "address", gameAddr.Hex()) - return bindings.NewFaultDisputeGame(bindings.WithClient(f.ethClient), bindings.WithTo(gameAddr), bindings.WithTest(f.t)) + return bindings.NewFaultDisputeGame(bindings.WithClient(f.ethClient), bindings.WithTo(gameAddr), bindings.WithTest(f.t)), gameAddr +} + +func (f *DisputeGameFactory) initBond(gameType challengerTypes.GameType) eth.ETH { + return eth.WeiBig(contract.Read(f.dgf.InitBonds(uint32(gameType)))) +} + +func (f *DisputeGameFactory) CreateHelperEOA(eoa *dsl.EOA) *GameHelperEOA { + helper := f.getGameHelper(eoa) + eoaHelper := helper.AuthEOA(eoa) + return &GameHelperEOA{ + helper: eoaHelper, + EOA: eoa, + } +} + +type GameHelperEOA struct { + helper *GameHelper + EOA *dsl.EOA +} + +func (a *GameHelperEOA) PerformMoves(game *FaultDisputeGame, moves ...GameHelperMove) []*Claim { + return a.helper.PerformMoves(a.EOA, game, moves) +} + +func (a *GameHelperEOA) Address() common.Address { + return a.EOA.Address() } diff --git a/op-devstack/dsl/proofs/fault_dispute_game.go b/op-devstack/dsl/proofs/fault_dispute_game.go index 5dcde362127b4..aa6bc8a15aeea 100644 --- a/op-devstack/dsl/proofs/fault_dispute_game.go +++ b/op-devstack/dsl/proofs/fault_dispute_game.go @@ -2,9 +2,13 @@ package proofs import ( "context" + "fmt" "math/big" "time" + challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" @@ -17,22 +21,28 @@ import ( "github.com/ethereum-optimism/optimism/op-service/txplan" ) +type gameHelperProvider func(deployer *dsl.EOA) *GameHelper + type FaultDisputeGame struct { - t devtest.T - require *require.Assertions - game *bindings.FaultDisputeGame + t devtest.T + require *require.Assertions + game *bindings.FaultDisputeGame + Address common.Address + helperProvider gameHelperProvider } -func NewFaultDisputeGame(t devtest.T, require *require.Assertions, game *bindings.FaultDisputeGame) *FaultDisputeGame { +func NewFaultDisputeGame(t devtest.T, require *require.Assertions, addr common.Address, helperProvider gameHelperProvider, game *bindings.FaultDisputeGame) *FaultDisputeGame { return &FaultDisputeGame{ - t: t, - require: require, - game: game, + t: t, + require: require, + game: game, + Address: addr, + helperProvider: helperProvider, } } -func (g *FaultDisputeGame) MaxDepth() uint64 { - return contract.Read(g.game.MaxGameDepth()).Uint64() +func (g *FaultDisputeGame) MaxDepth() challengerTypes.Depth { + return challengerTypes.Depth(contract.Read(g.game.MaxGameDepth()).Uint64()) } func (g *FaultDisputeGame) SplitDepth() uint64 { @@ -40,61 +50,81 @@ func (g *FaultDisputeGame) SplitDepth() uint64 { } func (g *FaultDisputeGame) RootClaim() *Claim { - return g.ClaimAtIndex(int64(0)) + return g.ClaimAtIndex(0) } func (g *FaultDisputeGame) L2SequenceNumber() *big.Int { return contract.Read(g.game.L2SequenceNumber()) } -func (g *FaultDisputeGame) ClaimAtIndex(claimIndex int64) *Claim { +func (g *FaultDisputeGame) ClaimAtIndex(claimIndex uint64) *Claim { claim := g.claimAtIndex(claimIndex) return g.newClaim(claimIndex, claim) } -func (g *FaultDisputeGame) Attack(eoa *dsl.EOA, claimIdx int64, newClaim common.Hash) { +func (g *FaultDisputeGame) Attack(eoa *dsl.EOA, claimIdx uint64, newClaim common.Hash) { claim := g.claimAtIndex(claimIdx) g.t.Logf("Attacking claim %v (depth: %d) with counter-claim %v", claimIdx, claim.Position.Depth(), newClaim) - newPosition := claim.Position.Attack().ToGIndex() - requiredBond := contract.Read(g.game.GetRequiredBond((*bindings.Uint128)(newPosition))) + requiredBond := g.requiredBond(claim.Position.Attack()) + + attackCall := g.game.Attack(claim.Value, new(big.Int).SetUint64(claimIdx), newClaim) - receipt := contract.Write(eoa, g.game.Attack(claim.Value, big.NewInt(claimIdx), newClaim), txplan.WithValue(requiredBond)) - g.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + receipt := contract.Write(eoa, attackCall, txplan.WithValue(requiredBond), txplan.WithGasRatio(2)) + g.t.Require().Equal(receipt.Status, types.ReceiptStatusSuccessful) } -func (g *FaultDisputeGame) newClaim(claimIndex int64, claim bindings.Claim) *Claim { +func (g *FaultDisputeGame) PerformMoves(eoa *dsl.EOA, moves ...GameHelperMove) []*Claim { + return g.helperProvider(eoa).PerformMoves(eoa, g, moves) +} + +func (g *FaultDisputeGame) requiredBond(pos challengerTypes.Position) eth.ETH { + return eth.WeiBig(contract.Read(g.game.GetRequiredBond((*bindings.Uint128)(pos.ToGIndex())))) +} + +func (g *FaultDisputeGame) status() gameTypes.GameStatus { + status := contract.Read(g.game.Status()) + return gameTypes.GameStatus(status) +} + +func (g *FaultDisputeGame) newClaim(claimIndex uint64, claim bindings.Claim) *Claim { return newClaim(g.t, g.require, claimIndex, claim, g) } -func (g *FaultDisputeGame) claimAtIndex(claimIndex int64) bindings.Claim { - return contract.Read(g.game.ClaimData(big.NewInt(claimIndex))).Decode() +func (g *FaultDisputeGame) claimAtIndex(claimIndex uint64) bindings.Claim { + return contract.Read(g.game.ClaimData(new(big.Int).SetUint64(claimIndex))).Decode() } func (g *FaultDisputeGame) allClaims() []bindings.Claim { - // TODO(#15948) - do we need to batch these? See: op-service/sources/batching.ReadArray - claimCount := contract.Read(g.game.ClaimDataLen()) + allClaimData := contract.ReadArray(g.game.ClaimDataLen(), func(i *big.Int) bindings.TypedCall[bindings.ClaimData] { + return g.game.ClaimData(i) + }) + + // Decode claims var claims []bindings.Claim - for i := int64(0); i < claimCount.Int64(); i++ { - claim := g.claimAtIndex(i) - claims = append(claims, claim) + for _, claimData := range allClaimData { + claims = append(claims, claimData.Decode()) } return claims } -func (g *FaultDisputeGame) waitForClaim(timeout time.Duration, errorMsg string, predicate func(claimIdx int64, claim bindings.Claim) bool) (int64, bindings.Claim) { +func (g *FaultDisputeGame) claimCount() uint64 { + return contract.Read(g.game.ClaimDataLen()).Uint64() +} + +func (g *FaultDisputeGame) waitForClaim(timeout time.Duration, errorMsg string, predicate func(claimIdx uint64, claim bindings.Claim) bool) (uint64, bindings.Claim) { timedCtx, cancel := context.WithTimeout(g.t.Ctx(), timeout) defer cancel() var matchedClaim bindings.Claim - var matchClaimIdx int64 + var matchClaimIdx uint64 err := wait.For(timedCtx, time.Second, func() (bool, error) { claims := g.allClaims() // Search backwards because the new claims are at the end and more likely the ones we want. for i := len(claims) - 1; i >= 0; i-- { claim := claims[i] - if predicate(int64(i), claim) { - matchClaimIdx = int64(i) + if predicate(uint64(i), claim) { + matchClaimIdx = uint64(i) matchedClaim = claim return true, nil } @@ -102,9 +132,28 @@ func (g *FaultDisputeGame) waitForClaim(timeout time.Duration, errorMsg string, return false, nil }) g.require.NoError(err, errorMsg) - // TODO(#15948) - Log GameData() - //if err != nil { // Avoid waiting time capturing game data when there's no error - // g.require.NoErrorf(err, "%v\n%v", errorMsg, g.GameData(ctx)) - //} + if err != nil { // Avoid waiting time capturing game data when there's no error + g.require.NoErrorf(err, "%v\n%v", errorMsg, g.GameData()) + } return matchClaimIdx, matchedClaim } + +func (g *FaultDisputeGame) GameData() string { + maxDepth := g.MaxDepth() + splitDepth := g.SplitDepth() + claims := g.allClaims() + info := fmt.Sprintf("Claim count: %v\n", len(claims)) + for i, claim := range claims { + pos := claim.Position + info = info + fmt.Sprintf("%v - Position: %v, Depth: %v, IndexAtDepth: %v Trace Index: %v, ClaimHash: %v, Countered By: %v, ParentIndex: %v Claimant: %v Bond: %v\n", + i, claim.Position.ToGIndex(), pos.Depth(), pos.IndexAtDepth(), pos.TraceIndex(maxDepth), claim.Value.Hex(), claim.CounteredBy, claim.ParentContractIndex, claim.Claimant, claim.Bond) + } + seqNum := g.L2SequenceNumber() + status := g.status() + return fmt.Sprintf("Game %v - %v - L2 Block: %v - Split Depth: %v - Max Depth: %v:\n%v\n", + g.Address, status, seqNum, splitDepth, maxDepth, info) +} + +func (g *FaultDisputeGame) LogGameData() { + g.t.Log(g.GameData()) +} diff --git a/op-devstack/dsl/proofs/game_helper.go b/op-devstack/dsl/proofs/game_helper.go new file mode 100644 index 0000000000000..a507530395870 --- /dev/null +++ b/op-devstack/dsl/proofs/game_helper.go @@ -0,0 +1,224 @@ +package proofs + +import ( + "bytes" + "encoding/json" + "math/big" + "os" + "path/filepath" + + challengerTypes "github.com/ethereum-optimism/optimism/op-challenger/game/fault/types" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/txplan" +) + +type GameHelperMove struct { + ParentIdx *big.Int + Claim common.Hash + Attack bool +} + +type contractArtifactData struct { + Bytecode []byte + ABI abi.ABI +} + +type GameHelper struct { + t devtest.T + require *require.Assertions + contractAddr common.Address + abi abi.ABI +} + +func DeployGameHelper(t devtest.T, deployer *dsl.EOA) *GameHelper { + req := require.New(t) + + artifactData := getGameHelperArtifactData(t) + + constructorABI := artifactData.ABI + + encodedArgs, err := constructorABI.Pack("") + req.NoError(err, "Failed to encode constructor arguments") + + deploymentData := append(artifactData.Bytecode, encodedArgs...) + + deployTxOpts := txplan.Combine( + deployer.Plan(), + txplan.WithData(deploymentData), + ) + + deployTx := txplan.NewPlannedTx(deployTxOpts) + receipt, err := deployTx.Included.Eval(t.Ctx()) + req.NoError(err, "Failed to deploy GameHelper contract") + + req.Equal(types.ReceiptStatusSuccessful, receipt.Status, "GameHelper deployment failed") + req.NotEqual(common.Address{}, receipt.ContractAddress, "GameHelper contract address not set in receipt") + + contractAddr := receipt.ContractAddress + t.Logf("GameHelper contract deployed at: %s", contractAddr.Hex()) + + return &GameHelper{ + t: t, + require: require.New(t), + contractAddr: contractAddr, + abi: artifactData.ABI, + } +} + +type ArtifactBytecode struct { + Object string `json:"object"` +} + +type ArtifactJSON struct { + Bytecode ArtifactBytecode `json:"bytecode"` + ABI json.RawMessage `json:"abi"` +} + +func getGameHelperArtifactData(t devtest.T) *contractArtifactData { + req := require.New(t) + artifactPath := getGameHelperArtifactPath(t) + + fileData, err := os.ReadFile(artifactPath) + req.NoError(err, "Failed to read GameHelper artifact file") + + var artifactJSON ArtifactJSON + err = json.Unmarshal(fileData, &artifactJSON) + req.NoError(err, "Failed to parse GameHelper artifact JSON") + + req.NotEmpty(artifactJSON.Bytecode.Object, "Bytecode object not found in GameHelper artifact") + + bytecode := common.FromHex(artifactJSON.Bytecode.Object) + + parsedABI, err := abi.JSON(bytes.NewReader(artifactJSON.ABI)) + req.NoError(err, "Failed to parse ABI") + + return &contractArtifactData{ + Bytecode: bytecode, + ABI: parsedABI, + } +} + +func getGameHelperArtifactPath(t devtest.T) string { + req := require.New(t) + wd, err := os.Getwd() + req.NoError(err, "Failed to get current working directory") + + monorepoRoot, err := opservice.FindMonorepoRoot(wd) + req.NoError(err, "Failed to find monorepo root") + + contractsBedrock := filepath.Join(monorepoRoot, "packages", "contracts-bedrock") + return filepath.Join(contractsBedrock, "forge-artifacts", "GameHelper.sol", "GameHelper.json") +} + +func (gs *GameHelper) AuthEOA(eoa *dsl.EOA) *GameHelper { + tx := txplan.NewPlannedTx(eoa.PlanAuth(gs.contractAddr)) + receipt, err := tx.Included.Eval(gs.t.Ctx()) + gs.require.NoError(err) + gs.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + return &GameHelper{ + t: gs.t, + require: require.New(gs.t), + contractAddr: eoa.Address(), + abi: gs.abi, + } +} + +func (gs *GameHelper) CreateGameWithClaims( + eoa *dsl.EOA, + factory *DisputeGameFactory, + gameType challengerTypes.GameType, + rootClaim common.Hash, + extraData []byte, + moves []GameHelperMove, +) common.Address { + data, err := gs.abi.Pack("createGameWithClaims", factory.Address(), gameType, rootClaim, extraData, moves) + gs.require.NoError(err) + + gameImpl := factory.GameImpl(gameType) + bonds := factory.initBond(gameType) + bonds = bonds.Add(gs.totalMoveBonds(gameImpl, moves)) + + tx := txplan.NewPlannedTx( + txplan.Combine( + eoa.Plan(), + txplan.WithValue(bonds), + txplan.WithTo(&gs.contractAddr), + txplan.WithData(data), + ), + ) + receipt, err := tx.Included.Eval(gs.t.Ctx()) + gs.require.NoError(err) + gs.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + + return receipt.ContractAddress +} + +func (gs *GameHelper) PerformMoves(eoa *dsl.EOA, game *FaultDisputeGame, moves []GameHelperMove) []*Claim { + data, err := gs.abi.Pack("performMoves", game.Address, moves) + gs.require.NoError(err) + + tx := txplan.NewPlannedTx( + txplan.Combine( + eoa.Plan(), + txplan.WithValue(gs.totalMoveBonds(game, moves)), + txplan.WithTo(&gs.contractAddr), + txplan.WithData(data), + ), + ) + preClaimCount := game.claimCount() + receipt, err := tx.Included.Eval(gs.t.Ctx()) + gs.require.NoError(err) + gs.require.Equal(types.ReceiptStatusSuccessful, receipt.Status) + postClaimCount := game.claimCount() + + // While all claims are performed within one transaction, it's possible another transaction also added claims + // between the calls to get claim count above (e.g. by a challenger running in parallel). + // So iterate to find the claims we added rather than just assuming the claim indices. + // Assumes that claims added by this helper contract are only added by this thread, + // which is safe because we deployed this particular instance of GameHelper. + claims := make([]*Claim, 0, len(moves)) + for claimIdx := preClaimCount; claimIdx < postClaimCount; claimIdx++ { + claim := game.ClaimAtIndex(claimIdx) + if claim.claim.Claimant != gs.contractAddr { + continue + } + claims = append(claims, claim) + } + gs.require.Equal(len(claims), len(moves), "Did not find claims for all moves") + return claims +} + +func (gs *GameHelper) totalMoveBonds(game *FaultDisputeGame, moves []GameHelperMove) eth.ETH { + claimPositions := map[uint64]challengerTypes.Position{ + 0: challengerTypes.RootPosition, + } + totalBond := eth.Ether(0) + for i, move := range moves { + parentPos := claimPositions[move.ParentIdx.Uint64()] + gs.require.NotEmpty(parentPos, "Move references non-existent parent - may be out of order") + childPos := parentPos.Defend() + if move.Attack { + childPos = parentPos.Attack() + } + claimPositions[uint64(i)+1] = childPos + bond := game.requiredBond(childPos) + totalBond = totalBond.Add(bond) + } + return totalBond +} + +func Move(parentIdx int64, claim common.Hash, attack bool) GameHelperMove { + return GameHelperMove{ + ParentIdx: big.NewInt(parentIdx), + Claim: claim, + Attack: attack, + } +} diff --git a/op-devstack/dsl/proofs/super_fault_dispute_game.go b/op-devstack/dsl/proofs/super_fault_dispute_game.go index 6b29ceecc8982..7e096bd5c066b 100644 --- a/op-devstack/dsl/proofs/super_fault_dispute_game.go +++ b/op-devstack/dsl/proofs/super_fault_dispute_game.go @@ -1,6 +1,7 @@ package proofs import ( + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -11,8 +12,8 @@ type SuperFaultDisputeGame struct { *FaultDisputeGame } -func NewSuperFaultDisputeGame(t devtest.T, require *require.Assertions, game *bindings.FaultDisputeGame) *SuperFaultDisputeGame { - fdg := NewFaultDisputeGame(t, require, game) +func NewSuperFaultDisputeGame(t devtest.T, require *require.Assertions, addr common.Address, helperProvider gameHelperProvider, game *bindings.FaultDisputeGame) *SuperFaultDisputeGame { + fdg := NewFaultDisputeGame(t, require, addr, helperProvider, game) return &SuperFaultDisputeGame{ FaultDisputeGame: fdg, } diff --git a/op-devstack/dsl/supervisor.go b/op-devstack/dsl/supervisor.go index c3dc800c5105d..786136ae6f83a 100644 --- a/op-devstack/dsl/supervisor.go +++ b/op-devstack/dsl/supervisor.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "strings" "time" "github.com/ethereum-optimism/optimism/op-devstack/stack" @@ -94,12 +95,18 @@ func (s *Supervisor) FetchSyncStatus() eth.SupervisorSyncStatus { s.log.Debug("Fetching supervisor sync status") ctx, cancel := context.WithTimeout(s.ctx, DefaultTimeout) defer cancel() - syncStatus, err := retry.Do(ctx, 2, retry.Fixed(500*time.Millisecond), func() (eth.SupervisorSyncStatus, error) { + syncStatus, err := retry.Do(ctx, 10, retry.Fixed(500*time.Millisecond), func() (eth.SupervisorSyncStatus, error) { ctx, cancel := context.WithTimeout(s.ctx, 300*time.Millisecond) defer cancel() syncStatus, err := s.inner.QueryAPI().SyncStatus(ctx) if errors.Is(err, status.ErrStatusTrackerNotReady) { s.log.Debug("Sync status not ready from supervisor") + return syncStatus, err + } + // Check for L1 sync mismatch error and retry + if err != nil && strings.Contains(err.Error(), "min synced L1 mismatch") { + s.log.Debug("L1 sync mismatch, retrying", "error", err) + return syncStatus, err } return syncStatus, err }) diff --git a/op-devstack/dsl/sync_tester.go b/op-devstack/dsl/sync_tester.go new file mode 100644 index 0000000000000..7f5fcf108cec6 --- /dev/null +++ b/op-devstack/dsl/sync_tester.go @@ -0,0 +1,48 @@ +package dsl + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// SyncTester wraps a stack.SyncTester interface for DSL operations +type SyncTester struct { + commonImpl + inner stack.SyncTester +} + +// NewSyncTester creates a new Sync Tester DSL wrapper +func NewSyncTester(inner stack.SyncTester) *SyncTester { + return &SyncTester{ + commonImpl: commonFromT(inner.T()), + inner: inner, + } +} + +// Escape returns the underlying stack.SyncTester +func (s *SyncTester) Escape() stack.SyncTester { + return s.inner +} + +func (s *SyncTester) ListSessions() []string { + sessionIDs, err := s.inner.API().ListSessions(s.ctx) + s.t.Require().NoError(err) + return sessionIDs +} + +func (s *SyncTester) GetSession(sessionID string) *eth.SyncTesterSession { + session, err := s.inner.APIWithSession(sessionID).GetSession(s.ctx) + s.t.Require().NoError(err) + return session +} + +func (s *SyncTester) DeleteSession(sessionID string) { + err := s.inner.APIWithSession(sessionID).DeleteSession(s.ctx) + s.t.Require().NoError(err) +} + +func (s *SyncTester) ChainID(sessionID string) eth.ChainID { + chainID, err := s.inner.APIWithSession(sessionID).ChainID(s.ctx) + s.t.Require().NoError(err, "should be able to get chain ID from SyncTester") + return chainID +} diff --git a/op-devstack/dsl/validators.go b/op-devstack/dsl/validators.go index d517953d1b3c3..94328e4333670 100644 --- a/op-devstack/dsl/validators.go +++ b/op-devstack/dsl/validators.go @@ -39,6 +39,8 @@ func IsForkActivated(c *params.ChainConfig, forkName rollup.ForkName, timestamp return c.IsOptimismHolocene(timestamp), nil case rollup.Isthmus: return c.IsOptimismIsthmus(timestamp), nil + case rollup.Jovian: + return c.IsOptimismJovian(timestamp), nil case rollup.Interop: return c.IsInterop(timestamp), nil default: diff --git a/op-devstack/presets/cl_config.go b/op-devstack/presets/cl_config.go index 62dc7a05b36d2..95a3d900ae6e3 100644 --- a/op-devstack/presets/cl_config.go +++ b/op-devstack/presets/cl_config.go @@ -4,31 +4,38 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/config" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" ) func WithExecutionLayerSyncOnVerifiers() stack.CommonOption { return stack.MakeCommon( - sysgo.WithL2CLOption(func(_ devtest.P, id stack.L2CLNodeID, cfg *config.Config) { - // Can't enable ELSync on the sequencer or it will never start sequencing because - // ELSync needs to receive gossip from the sequencer to drive the sync - if !cfg.Driver.SequencerEnabled { - cfg.Sync.SyncMode = sync.ELSync - } - })) + sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = sync.ELSync + }))) } func WithConsensusLayerSync() stack.CommonOption { return stack.MakeCommon( - sysgo.WithL2CLOption(func(_ devtest.P, id stack.L2CLNodeID, cfg *config.Config) { - cfg.Sync.SyncMode = sync.CLSync - })) + sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + cfg.SequencerSyncMode = sync.CLSync + cfg.VerifierSyncMode = sync.CLSync + }))) } func WithSafeDBEnabled() stack.CommonOption { return stack.MakeCommon( - sysgo.WithL2CLOption(func(p devtest.P, _ stack.L2CLNodeID, cfg *config.Config) { - cfg.SafeDBPath = p.TempDir() - })) + sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + cfg.SafeDBPath = p.TempDir() + }))) +} + +func WithReqRespSyncDisabled() stack.CommonOption { + return stack.MakeCommon( + sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + cfg.EnableReqRespSync = false + }))) } diff --git a/op-devstack/presets/disputegame_v2.go b/op-devstack/presets/disputegame_v2.go new file mode 100644 index 0000000000000..a47a1cf088b9d --- /dev/null +++ b/op-devstack/presets/disputegame_v2.go @@ -0,0 +1,11 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func WithDisputeGameV2() stack.CommonOption { + return stack.MakeCommon(sysgo.WithDeployerOptions(sysgo.WithDevFeatureBitmap(deployer.DeployV2DisputeGamesDevFlag))) +} diff --git a/op-devstack/presets/flashblocks.go b/op-devstack/presets/flashblocks.go index 48e50fa096627..43b9deb504d5b 100644 --- a/op-devstack/presets/flashblocks.go +++ b/op-devstack/presets/flashblocks.go @@ -26,8 +26,7 @@ func WithSimpleFlashblocks() stack.CommonOption { return stack.Combine( stack.MakeCommon(sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{})), // TODO(#16450): add sysgo support for flashblocks - // TODO(#16514): add kurtosis support for flashblocks - WithCompatibleTypes(compat.Persistent), + WithCompatibleTypes(compat.Persistent, compat.Kurtosis), ) } diff --git a/op-devstack/presets/interop.go b/op-devstack/presets/interop.go index 6edde616c3dc1..0cfdfd32c45e4 100644 --- a/op-devstack/presets/interop.go +++ b/op-devstack/presets/interop.go @@ -68,7 +68,7 @@ func NewSingleChainInterop(t devtest.T) *SingleChainInterop { L2ChainA: dsl.NewL2Network(l2A, orch.ControlPlane()), L2ELA: dsl.NewL2ELNode(l2A.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), L2CLA: dsl.NewL2CLNode(l2A.L2CLNode(match.Assume(t, match.FirstL2CL)), orch.ControlPlane()), - Wallet: dsl.NewHDWallet(t, devkeys.TestMnemonic, 30), + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation FaucetA: dsl.NewFaucet(l2A.Faucet(match.Assume(t, match.FirstFaucet))), L2BatcherA: dsl.NewL2Batcher(l2A.L2Batcher(match.Assume(t, match.FirstL2Batcher))), } diff --git a/op-devstack/presets/jovian.go b/op-devstack/presets/jovian.go new file mode 100644 index 0000000000000..b0a96d763201b --- /dev/null +++ b/op-devstack/presets/jovian.go @@ -0,0 +1,21 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-node/rollup" +) + +// WithJovianAtGenesis configures all L2s to activate the Jovian fork at genesis in sysgo mode. +func WithJovianAtGenesis() stack.CommonOption { + return stack.MakeCommon(sysgo.WithDeployerOptions( + func(p devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + for _, l2Cfg := range builder.L2s() { + l2Cfg.WithForkAtGenesis(rollup.Jovian) + } + }, + )) +} diff --git a/op-devstack/presets/minimal.go b/op-devstack/presets/minimal.go index 02c04b2534cf3..d97d3b1c7ab39 100644 --- a/op-devstack/presets/minimal.go +++ b/op-devstack/presets/minimal.go @@ -3,9 +3,9 @@ package presets import ( "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/stack/match" @@ -25,8 +25,6 @@ type Minimal struct { L2EL *dsl.L2ELNode L2CL *dsl.L2CLNode - TestSequencer *dsl.TestSequencer - Wallet *dsl.HDWallet FaucetL1 *dsl.Faucet @@ -45,6 +43,10 @@ func (m *Minimal) StandardBridge() *dsl.StandardBridge { return dsl.NewStandardBridge(m.T, m.L2Chain, nil, m.L1EL) } +func (m *Minimal) DisputeGameFactory() *proofs.DisputeGameFactory { + return proofs.NewDisputeGameFactory(m.T, m.L1Network, m.L1EL.EthClient(), m.L2Chain.DisputeGameFactoryProxyAddr(), nil) +} + func WithMinimal() stack.CommonOption { return stack.MakeCommon(sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{})) } @@ -58,25 +60,22 @@ func NewMinimal(t devtest.T) *Minimal { } func minimalFromSystem(t devtest.T, system stack.ExtensibleSystem, orch stack.Orchestrator) *Minimal { - t.Gate().Equal(len(system.TestSequencers()), 1, "expected exactly one test sequencer") - l1Net := system.L1Network(match.FirstL1Network) l2 := system.L2Network(match.Assume(t, match.L2ChainA)) sequencerCL := l2.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) sequencerEL := l2.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) out := &Minimal{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2EL: dsl.NewL2ELNode(sequencerEL, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), - Wallet: dsl.NewHDWallet(t, devkeys.TestMnemonic, 30), - FaucetL2: dsl.NewFaucet(l2.Faucet(match.Assume(t, match.FirstFaucet))), + Log: t.Logger(), + T: t, + ControlPlane: orch.ControlPlane(), + L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), + L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), + L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), + L2Batcher: dsl.NewL2Batcher(l2.L2Batcher(match.Assume(t, match.FirstL2Batcher))), + L2EL: dsl.NewL2ELNode(sequencerEL, orch.ControlPlane()), + L2CL: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation + FaucetL2: dsl.NewFaucet(l2.Faucet(match.Assume(t, match.FirstFaucet))), } out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) diff --git a/op-devstack/presets/minimal_external_el.go b/op-devstack/presets/minimal_external_el.go new file mode 100644 index 0000000000000..7b0bfd709c263 --- /dev/null +++ b/op-devstack/presets/minimal_external_el.go @@ -0,0 +1,36 @@ +package presets + +import ( + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +type MinimalExternalEL struct { + Log log.Logger + T devtest.T + ControlPlane stack.ControlPlane + + L1Network *dsl.L1Network + L1EL *dsl.L1ELNode + + L2Chain *dsl.L2Network + L2CL *dsl.L2CLNode + L2EL *dsl.L2ELNode + L2ELReadOnly *dsl.L2ELNode + + SyncTester *dsl.SyncTester +} + +func (m *MinimalExternalEL) L2Networks() []*dsl.L2Network { + return []*dsl.L2Network{ + m.L2Chain, + } +} + +func WithExternalELWithSuperchainRegistry(networkPreset stack.ExtNetworkConfig) stack.CommonOption { + return stack.MakeCommon(sysgo.ExternalELSystemWithEndpointAndSuperchainRegistry(&sysgo.DefaultMinimalExternalELSystemIDs{}, networkPreset)) +} diff --git a/op-devstack/presets/minimal_with_synctester.go b/op-devstack/presets/minimal_with_synctester.go new file mode 100644 index 0000000000000..eb8b3e3f99ed6 --- /dev/null +++ b/op-devstack/presets/minimal_with_synctester.go @@ -0,0 +1,34 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type MinimalWithSyncTester struct { + Minimal + + SyncTester *dsl.SyncTester +} + +func WithMinimalWithSyncTester(fcu eth.FCUState) stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultMinimalSystemWithSyncTester(&sysgo.DefaultMinimalSystemWithSyncTesterIDs{}, fcu)) +} + +func NewMinimalWithSyncTester(t devtest.T) *MinimalWithSyncTester { + system := shim.NewSystem(t) + orch := Orchestrator() + orch.Hydrate(system) + minimal := minimalFromSystem(t, system, orch) + l2 := system.L2Network(match.Assume(t, match.L2ChainA)) + syncTester := l2.SyncTester(match.Assume(t, match.FirstSyncTester)) + return &MinimalWithSyncTester{ + Minimal: *minimal, + SyncTester: dsl.NewSyncTester(syncTester), + } +} diff --git a/op-devstack/presets/orchestrator.go b/op-devstack/presets/orchestrator.go index 0f2d4aad7b59d..00b264bbfb0ac 100644 --- a/op-devstack/presets/orchestrator.go +++ b/op-devstack/presets/orchestrator.go @@ -7,7 +7,6 @@ import ( "runtime/debug" "slices" "sync/atomic" - "testing" "github.com/ethereum/go-ethereum/log" "go.opentelemetry.io/otel" @@ -37,10 +36,14 @@ const ( backendKindSysExt backendKind = "sysext" ) +type TestingM interface { + Run() int +} + // DoMain runs M with the pre- and post-processing of tests, // to setup the default global orchestrator and global logger. // This will os.Exit(code) and not return. -func DoMain(m *testing.M, opts ...stack.CommonOption) { +func DoMain(m TestingM, opts ...stack.CommonOption) { // nest the function, so we can defer-recover and defer-cleanup, before os.Exit code := func() (errCode int) { failed := new(atomic.Bool) @@ -50,7 +53,7 @@ func DoMain(m *testing.M, opts ...stack.CommonOption) { } }() defer func() { - if x := recover(); x != nil { + if x := recover(); x != nil && !failed.Load() { debug.PrintStack() _, _ = fmt.Fprintf(os.Stderr, "Panic during test Main: %v\n", x) @@ -84,9 +87,11 @@ func DoMain(m *testing.M, opts ...stack.CommonOption) { logger.SetContext(ctx) onFail := func(now bool) { - logger.Error("Main failed") - debug.PrintStack() - failed.Store(true) + if !failed.Load() { + logger.Error("Main failed") + debug.PrintStack() + failed.Store(true) + } if now { panic("critical Main fail") } @@ -101,12 +106,6 @@ func DoMain(m *testing.M, opts ...stack.CommonOption) { p.Require().NotEmpty(opts, "Expecting orchestrator options") - // For the global geth logs, - // capture them in the global test logger. - // No other tool / test should change the global logger. - // TODO(#15139): set log-level filter, reduce noise - //log.SetDefault(t.Log.New("logger", "global")) - initOrchestrator(ctx, p, stack.Combine(opts...)) errCode = m.Run() diff --git a/op-devstack/presets/simple_with_synctester.go b/op-devstack/presets/simple_with_synctester.go new file mode 100644 index 0000000000000..0afb33bcd305b --- /dev/null +++ b/op-devstack/presets/simple_with_synctester.go @@ -0,0 +1,48 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup" +) + +type SimpleWithSyncTester struct { + Minimal + + SyncTester *dsl.SyncTester + SyncTesterL2EL *dsl.L2ELNode + L2CL2 *dsl.L2CLNode +} + +func WithSimpleWithSyncTester() stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultSimpleSystemWithSyncTester(&sysgo.DefaultSimpleSystemWithSyncTesterIDs{})) +} + +func NewSimpleWithSyncTester(t devtest.T) *SimpleWithSyncTester { + system := shim.NewSystem(t) + orch := Orchestrator() + orch.Hydrate(system) + minimal := minimalFromSystem(t, system, orch) + l2 := system.L2Network(match.L2ChainA) + syncTester := l2.SyncTester(match.FirstSyncTester) + + // L2CL connected to L2EL initialized by sync tester + l2CL2 := l2.L2CLNode(match.SecondL2CL) + // L2EL initialized by sync tester + syncTesterL2EL := l2.L2ELNode(match.SecondL2EL) + + return &SimpleWithSyncTester{ + Minimal: *minimal, + SyncTester: dsl.NewSyncTester(syncTester), + SyncTesterL2EL: dsl.NewL2ELNode(syncTesterL2EL, orch.ControlPlane()), + L2CL2: dsl.NewL2CLNode(l2CL2, orch.ControlPlane()), + } +} + +func WithHardforkSequentialActivation(startFork, endFork rollup.ForkName, delta uint64) stack.CommonOption { + return stack.MakeCommon(sysgo.WithDeployerOptions(sysgo.WithHardforkSequentialActivation(startFork, endFork, &delta))) +} diff --git a/op-devstack/presets/singlechain_multinode.go b/op-devstack/presets/singlechain_multinode.go index f950ce8b24242..c417b435f67e4 100644 --- a/op-devstack/presets/singlechain_multinode.go +++ b/op-devstack/presets/singlechain_multinode.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type SingleChainMultiNode struct { @@ -21,6 +22,16 @@ func WithSingleChainMultiNode() stack.CommonOption { } func NewSingleChainMultiNode(t devtest.T) *SingleChainMultiNode { + preset := NewSingleChainMultiNodeWithoutCheck(t) + // Ensure the follower node is in sync with the sequencer before starting tests + dsl.CheckAll(t, + preset.L2CLB.MatchedFn(preset.L2CL, types.CrossSafe, 30), + preset.L2CLB.MatchedFn(preset.L2CL, types.LocalUnsafe, 30), + ) + return preset +} + +func NewSingleChainMultiNodeWithoutCheck(t devtest.T) *SingleChainMultiNode { system := shim.NewSystem(t) orch := Orchestrator() orch.Hydrate(system) @@ -35,9 +46,14 @@ func NewSingleChainMultiNode(t devtest.T) *SingleChainMultiNode { match.And( match.EngineFor(verifierCL), match.Not[stack.L2ELNodeID, stack.L2ELNode](minimal.L2EL.ID())))) - return &SingleChainMultiNode{ + preset := &SingleChainMultiNode{ Minimal: *minimal, L2ELB: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), L2CLB: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), } + return preset +} + +func WithSingleChainMultiNodeWithoutP2P() stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultSingleChainMultiNodeSystemWithoutP2P(&sysgo.DefaultSingleChainMultiNodeSystemIDs{})) } diff --git a/op-devstack/presets/sync_tester_config.go b/op-devstack/presets/sync_tester_config.go new file mode 100644 index 0000000000000..8687952a00599 --- /dev/null +++ b/op-devstack/presets/sync_tester_config.go @@ -0,0 +1,25 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func WithSyncTesterELInitialState(fcu eth.FCUState) stack.CommonOption { + return stack.MakeCommon( + sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( + func(_ devtest.P, id stack.L2ELNodeID, cfg *sysgo.SyncTesterELConfig) { + cfg.FCUState = fcu + }))) +} + +func WithELSyncTarget(elSyncTarget uint64) stack.CommonOption { + return stack.MakeCommon( + sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( + func(_ devtest.P, id stack.L2ELNodeID, cfg *sysgo.SyncTesterELConfig) { + cfg.ELSyncActive = true + cfg.ELSyncTarget = elSyncTarget + }))) +} diff --git a/op-devstack/shared/challenger/challenger.go b/op-devstack/shared/challenger/challenger.go index 2f5af4e08c11a..8ca63f7012821 100644 --- a/op-devstack/shared/challenger/challenger.go +++ b/op-devstack/shared/challenger/challenger.go @@ -52,7 +52,7 @@ func WithPrivKey(key *ecdsa.PrivateKey) Option { } } -func applyCannonConfig(c *config.Config, rollupCfgs []*rollup.Config, l2Geneses []*core.Genesis, prestateVariant PrestateVariant) error { +func applyCannonConfig(c *config.Config, rollupCfgs []*rollup.Config, l1Genesis *core.Genesis, l2Geneses []*core.Genesis, prestateVariant PrestateVariant) error { root, err := findMonorepoRoot() if err != nil { return err @@ -79,6 +79,17 @@ func applyCannonConfig(c *config.Config, rollupCfgs []*rollup.Config, l2Geneses c.Cannon.L2GenesisPaths = append(c.Cannon.L2GenesisPaths, genesisFile) } + l1GenesisBytes, err := json.Marshal(l1Genesis) + if err != nil { + return fmt.Errorf("marshall l1 genesis config: %w", err) + } + l1GenesisFile := filepath.Join(c.Datadir, fmt.Sprintf("l1-genesis-%v.json", l1Genesis.Config.ChainID)) + err = os.WriteFile(l1GenesisFile, l1GenesisBytes, 0o644) + if err != nil { + return fmt.Errorf("write l1 genesis config: %w", err) + } + c.Cannon.L1GenesisPath = l1GenesisFile + for _, rollupCfg := range rollupCfgs { rollupBytes, err := json.Marshal(rollupCfg) if err != nil { @@ -101,9 +112,9 @@ func WithFactoryAddress(addr common.Address) Option { } } -func WithCannonConfig(rollupCfgs []*rollup.Config, l2Geneses []*core.Genesis, prestateVariant PrestateVariant) Option { +func WithCannonConfig(rollupCfgs []*rollup.Config, l1Genesis *core.Genesis, l2Geneses []*core.Genesis, prestateVariant PrestateVariant) Option { return func(c *config.Config) error { - return applyCannonConfig(c, rollupCfgs, l2Geneses, prestateVariant) + return applyCannonConfig(c, rollupCfgs, l1Genesis, l2Geneses, prestateVariant) } } diff --git a/op-devstack/shim/fb_builder.go b/op-devstack/shim/fb_builder.go index 7d1df9fb48f58..960747612e38f 100644 --- a/op-devstack/shim/fb_builder.go +++ b/op-devstack/shim/fb_builder.go @@ -1,6 +1,8 @@ package shim import ( + "net/http" + "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-devstack/stack" @@ -10,9 +12,10 @@ import ( type FlashblocksBuilderNodeConfig struct { ELNodeConfig - ID stack.FlashblocksBuilderID - Conductor stack.Conductor - FlashblocksWsUrl string + ID stack.FlashblocksBuilderID + Conductor stack.Conductor + FlashblocksWsUrl string + FlashblocksWsHeaders http.Header } type flashblocksBuilderNode struct { @@ -22,7 +25,8 @@ type flashblocksBuilderNode struct { id stack.FlashblocksBuilderID conductor stack.Conductor - flashblocksWsUrl string + flashblocksWsUrl string + flashblocksWsHeaders http.Header } var _ stack.FlashblocksBuilderNode = (*flashblocksBuilderNode)(nil) @@ -34,11 +38,12 @@ func NewFlashblocksBuilderNode(cfg FlashblocksBuilderNodeConfig) stack.Flashbloc require.NoError(cfg.T, err) return &flashblocksBuilderNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - l2Client: l2Client, - id: cfg.ID, - conductor: cfg.Conductor, - flashblocksWsUrl: cfg.FlashblocksWsUrl, + rpcELNode: newRpcELNode(cfg.ELNodeConfig), + l2Client: l2Client, + id: cfg.ID, + conductor: cfg.Conductor, + flashblocksWsUrl: cfg.FlashblocksWsUrl, + flashblocksWsHeaders: cfg.FlashblocksWsHeaders, } } @@ -57,3 +62,7 @@ func (r *flashblocksBuilderNode) L2EthClient() apis.L2EthClient { func (r *flashblocksBuilderNode) FlashblocksWsUrl() string { return r.flashblocksWsUrl } + +func (r *flashblocksBuilderNode) FlashblocksWsHeaders() http.Header { + return r.flashblocksWsHeaders +} diff --git a/op-devstack/shim/fb_ws_proxy.go b/op-devstack/shim/fb_ws_proxy.go index b2e83174374f8..b01d10441efc4 100644 --- a/op-devstack/shim/fb_ws_proxy.go +++ b/op-devstack/shim/fb_ws_proxy.go @@ -1,20 +1,24 @@ package shim import ( + "net/http" + "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/eth" ) type FlashblocksWebsocketProxyConfig struct { CommonConfig - ID stack.FlashblocksWebsocketProxyID - WsUrl string + ID stack.FlashblocksWebsocketProxyID + WsUrl string + WsHeaders http.Header } type flashblocksWebsocketProxy struct { commonImpl - id stack.FlashblocksWebsocketProxyID - wsUrl string + id stack.FlashblocksWebsocketProxyID + wsUrl string + wsHeaders http.Header } var _ stack.FlashblocksWebsocketProxy = (*flashblocksWebsocketProxy)(nil) @@ -25,6 +29,7 @@ func NewFlashblocksWebsocketProxy(cfg FlashblocksWebsocketProxyConfig) stack.Fla commonImpl: newCommon(cfg.CommonConfig), id: cfg.ID, wsUrl: cfg.WsUrl, + wsHeaders: cfg.WsHeaders, } } @@ -39,3 +44,7 @@ func (r *flashblocksWebsocketProxy) ChainID() eth.ChainID { func (r *flashblocksWebsocketProxy) WsUrl() string { return r.wsUrl } + +func (r *flashblocksWebsocketProxy) WsHeaders() http.Header { + return r.wsHeaders +} diff --git a/op-devstack/shim/l2_cl.go b/op-devstack/shim/l2_cl.go index b1493b052136b..bcf02f783553f 100644 --- a/op-devstack/shim/l2_cl.go +++ b/op-devstack/shim/l2_cl.go @@ -14,6 +14,8 @@ type L2CLNodeConfig struct { ID stack.L2CLNodeID Client client.RPC + UserRPC string + InteropEndpoint string InteropJwtSecret eth.Bytes32 } @@ -26,6 +28,8 @@ type rpcL2CLNode struct { p2pClient apis.P2PClient els locks.RWMap[stack.L2ELNodeID, stack.L2ELNode] + userRPC string + // Store interop ws endpoints and secrets to provide to the supervisor, // when reconnection happens using the supervisor's admin_addL2RPC method. // These fields are not intended for manual dial-in or initializing client.RPC @@ -44,11 +48,16 @@ func NewL2CLNode(cfg L2CLNodeConfig) stack.L2CLNode { client: cfg.Client, rollupClient: sources.NewRollupClient(cfg.Client), p2pClient: sources.NewP2PClient(cfg.Client), + userRPC: cfg.UserRPC, interopEndpoint: cfg.InteropEndpoint, interopJwtSecret: cfg.InteropJwtSecret, } } +func (r *rpcL2CLNode) ClientRPC() client.RPC { + return r.client +} + func (r *rpcL2CLNode) ID() stack.L2CLNodeID { return r.id } @@ -69,6 +78,10 @@ func (r *rpcL2CLNode) ELs() []stack.L2ELNode { return stack.SortL2ELNodes(r.els.Values()) } +func (r *rpcL2CLNode) UserRPC() string { + return r.userRPC +} + func (r *rpcL2CLNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { return r.interopEndpoint, r.interopJwtSecret } diff --git a/op-devstack/shim/l2_el.go b/op-devstack/shim/l2_el.go index 8d62cc54b1405..b43786af451a1 100644 --- a/op-devstack/shim/l2_el.go +++ b/op-devstack/shim/l2_el.go @@ -6,18 +6,21 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/sources" ) type L2ELNodeConfig struct { ELNodeConfig - RollupCfg *rollup.Config - ID stack.L2ELNodeID + EngineClient client.RPC + RollupCfg *rollup.Config + ID stack.L2ELNodeID } type rpcL2ELNode struct { rpcELNode - l2Client *sources.L2Client + l2Client *sources.L2Client + l2EngineClient *sources.EngineClient id stack.L2ELNodeID } @@ -30,11 +33,17 @@ func NewL2ELNode(cfg L2ELNodeConfig) stack.L2ELNode { require.NotNil(cfg.T, cfg.RollupCfg, "rollup config must be configured") l2Client, err := sources.NewL2Client(cfg.ELNodeConfig.Client, cfg.T.Logger(), nil, sources.L2ClientSimpleConfig(cfg.RollupCfg, false, 10, 10)) require.NoError(cfg.T, err) - + engineClientConfig := &sources.EngineClientConfig{ + L2ClientConfig: *sources.L2ClientSimpleConfig(cfg.RollupCfg, false, 10, 10), + } + // initialize engine API client using different client + engineClient, err := sources.NewEngineClient(cfg.EngineClient, cfg.T.Logger(), nil, engineClientConfig) + require.NoError(cfg.T, err) return &rpcL2ELNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - l2Client: l2Client, - id: cfg.ID, + rpcELNode: newRpcELNode(cfg.ELNodeConfig), + l2Client: l2Client, + l2EngineClient: engineClient, + id: cfg.ID, } } @@ -45,3 +54,11 @@ func (r *rpcL2ELNode) ID() stack.L2ELNodeID { func (r *rpcL2ELNode) L2EthClient() apis.L2EthClient { return r.l2Client } + +func (r *rpcL2ELNode) L2EthExtendedClient() apis.L2EthExtendedClient { + return r.l2Client +} + +func (r *rpcL2ELNode) L2EngineClient() apis.EngineClient { + return r.l2EngineClient.EngineAPIClient +} diff --git a/op-devstack/shim/network.go b/op-devstack/shim/network.go index b33e9f2ecdcdd..80f8a7cfbe3a4 100644 --- a/op-devstack/shim/network.go +++ b/op-devstack/shim/network.go @@ -18,7 +18,8 @@ type presetNetwork struct { chainCfg *params.ChainConfig chainID eth.ChainID - faucets locks.RWMap[stack.FaucetID, stack.Faucet] + faucets locks.RWMap[stack.FaucetID, stack.Faucet] + syncTesters locks.RWMap[stack.SyncTesterID, stack.SyncTester] } var _ stack.Network = (*presetNetwork)(nil) @@ -59,3 +60,23 @@ func (p *presetNetwork) AddFaucet(v stack.Faucet) { p.require().Equal(p.chainID, id.ChainID(), "faucet %s must be on chain %s", id, p.chainID) p.require().True(p.faucets.SetIfMissing(id, v), "faucet %s must not already exist", id) } + +func (p *presetNetwork) SyncTesterIDs() []stack.SyncTesterID { + return stack.SortSyncTesterIDs(p.syncTesters.Keys()) +} + +func (p *presetNetwork) SyncTesters() []stack.SyncTester { + return stack.SortSyncTesters(p.syncTesters.Values()) +} + +func (p *presetNetwork) SyncTester(m stack.SyncTesterMatcher) stack.SyncTester { + v, ok := findMatch(m, p.syncTesters.Get, p.SyncTesters) + p.require().True(ok, "must find sync tester %s", m) + return v +} + +func (p *presetNetwork) AddSyncTester(v stack.SyncTester) { + id := v.ID() + p.require().Equal(p.chainID, id.ChainID(), "sync tester %s must be on chain %s", id, p.chainID) + p.require().True(p.syncTesters.SetIfMissing(id, v), "sync tester %s must not already exist", id) +} diff --git a/op-devstack/shim/sync_tester.go b/op-devstack/shim/sync_tester.go new file mode 100644 index 0000000000000..460169e148fdd --- /dev/null +++ b/op-devstack/shim/sync_tester.go @@ -0,0 +1,56 @@ +package shim + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" +) + +type SyncTesterConfig struct { + CommonConfig + ID stack.SyncTesterID + Addr string + Client client.RPC +} + +// presetSyncTester wraps around a syncTester-service, +type presetSyncTester struct { + commonImpl + id stack.SyncTesterID + // Endpoint for initializing RPC Client per session + addr string + // RPC Client initialized without session + syncTesterClient *sources.SyncTesterClient +} + +var _ stack.SyncTester = (*presetSyncTester)(nil) + +func NewSyncTester(cfg SyncTesterConfig) stack.SyncTester { + cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) + return &presetSyncTester{ + id: cfg.ID, + commonImpl: newCommon(cfg.CommonConfig), + addr: cfg.Addr, + syncTesterClient: sources.NewSyncTesterClient(cfg.Client), + } +} + +func (p *presetSyncTester) ID() stack.SyncTesterID { + return p.id +} + +func (p *presetSyncTester) API() apis.SyncTester { + return p.syncTesterClient +} + +func (p *presetSyncTester) APIWithSession(sessionID string) apis.SyncTester { + require := p.T().Require() + require.NoError(synctester.IsValidSessionID(sessionID)) + rpcCl, err := client.NewRPC(p.T().Ctx(), p.Logger(), p.addr+fmt.Sprintf("/%s", sessionID), client.WithLazyDial()) + require.NoError(err, "sync tester failed to initialize rpc per session") + return sources.NewSyncTesterClient(rpcCl) +} diff --git a/op-devstack/shim/system.go b/op-devstack/shim/system.go index 64c0b9c91ba73..72c0468dbb3f4 100644 --- a/op-devstack/shim/system.go +++ b/op-devstack/shim/system.go @@ -34,6 +34,7 @@ type presetSystem struct { supervisors locks.RWMap[stack.SupervisorID, stack.Supervisor] sequencers locks.RWMap[stack.TestSequencerID, stack.TestSequencer] + syncTesters locks.RWMap[stack.SyncTesterID, stack.SyncTester] } var _ stack.ExtensibleSystem = (*presetSystem)(nil) @@ -120,6 +121,10 @@ func (p *presetSystem) AddTestSequencer(v stack.TestSequencer) { p.require().True(p.sequencers.SetIfMissing(v.ID(), v), "sequencer %s must not already exist", v.ID()) } +func (p *presetSystem) AddSyncTester(v stack.SyncTester) { + p.require().True(p.syncTesters.SetIfMissing(v.ID(), v), "sync tester %s must not already exist", v.ID()) +} + func (p *presetSystem) SuperchainIDs() []stack.SuperchainID { return stack.SortSuperchainIDs(p.superchains.Keys()) } diff --git a/op-devstack/stack/ext_network_config.go b/op-devstack/stack/ext_network_config.go new file mode 100644 index 0000000000000..b5bac43d1bfab --- /dev/null +++ b/op-devstack/stack/ext_network_config.go @@ -0,0 +1,11 @@ +package stack + +import "github.com/ethereum-optimism/optimism/op-service/eth" + +type ExtNetworkConfig struct { + L2NetworkName string + L1ChainID eth.ChainID + L2ELEndpoint string + L1CLBeaconEndpoint string + L1ELEndpoint string +} diff --git a/op-devstack/stack/fb_builder.go b/op-devstack/stack/fb_builder.go index 7453f6b7caeae..1a67f5b1517ef 100644 --- a/op-devstack/stack/fb_builder.go +++ b/op-devstack/stack/fb_builder.go @@ -2,6 +2,7 @@ package stack import ( "log/slog" + "net/http" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -13,6 +14,7 @@ type FlashblocksBuilderNode interface { Conductor() Conductor L2EthClient() apis.L2EthClient FlashblocksWsUrl() string + FlashblocksWsHeaders() http.Header } type FlashblocksBuilderID idWithChain diff --git a/op-devstack/stack/fb_ws_proxy.go b/op-devstack/stack/fb_ws_proxy.go index 52e6f2ed63a33..4b0f1167bfae8 100644 --- a/op-devstack/stack/fb_ws_proxy.go +++ b/op-devstack/stack/fb_ws_proxy.go @@ -2,6 +2,7 @@ package stack import ( "log/slog" + "net/http" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -11,6 +12,7 @@ type FlashblocksWebsocketProxy interface { ChainID() eth.ChainID ID() FlashblocksWebsocketProxyID WsUrl() string + WsHeaders() http.Header } type FlashblocksWebsocketProxyID idWithChain diff --git a/op-devstack/stack/l2_cl.go b/op-devstack/stack/l2_cl.go index 1cb60e25c54de..e5e5b042b3433 100644 --- a/op-devstack/stack/l2_cl.go +++ b/op-devstack/stack/l2_cl.go @@ -4,6 +4,7 @@ import ( "log/slog" "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -72,9 +73,11 @@ type L2CLNode interface { Common ID() L2CLNodeID + ClientRPC() client.RPC RollupAPI() apis.RollupClient P2PAPI() apis.P2PClient InteropRPC() (endpoint string, jwtSecret eth.Bytes32) + UserRPC() string // ELs returns the engine(s) that this L2CLNode is connected to. // This may be empty, if the L2CL is not connected to any. diff --git a/op-devstack/stack/l2_el.go b/op-devstack/stack/l2_el.go index b600538888459..3616e39972084 100644 --- a/op-devstack/stack/l2_el.go +++ b/op-devstack/stack/l2_el.go @@ -71,6 +71,8 @@ func (id L2ELNodeID) Match(elems []L2ELNode) []L2ELNode { type L2ELNode interface { ID() L2ELNodeID L2EthClient() apis.L2EthClient + L2EthExtendedClient() apis.L2EthExtendedClient + L2EngineClient() apis.EngineClient ELNode } diff --git a/op-devstack/stack/match/first.go b/op-devstack/stack/match/first.go index 1a4e419bc73fc..572b49e335c03 100644 --- a/op-devstack/stack/match/first.go +++ b/op-devstack/stack/match/first.go @@ -20,3 +20,4 @@ var FirstSuperchain = First[stack.SuperchainID, stack.Superchain]() var FirstCluster = First[stack.ClusterID, stack.Cluster]() var FirstFaucet = First[stack.FaucetID, stack.Faucet]() +var FirstSyncTester = First[stack.SyncTesterID, stack.SyncTester]() diff --git a/op-devstack/stack/match/labels.go b/op-devstack/stack/match/labels.go index ad2b217cfc04d..9224f6c855049 100644 --- a/op-devstack/stack/match/labels.go +++ b/op-devstack/stack/match/labels.go @@ -17,19 +17,22 @@ const ( LabelVendor = "vendor" ) -type L2ELVendor string +type Vendor string const ( - OpReth L2ELVendor = "op-reth" - OpGeth L2ELVendor = "op-geth" - Proxyd L2ELVendor = "proxyd" - FlashblocksWebsocketProxy L2ELVendor = "flashblocks-websocket-proxy" + Geth Vendor = "geth" + OpReth Vendor = "op-reth" + OpGeth Vendor = "op-geth" + Proxyd Vendor = "proxyd" + FlashblocksWebsocketProxy Vendor = "flashblocks-websocket-proxy" + OpNode Vendor = "op-node" + KonaNode Vendor = "kona-node" ) -func (v L2ELVendor) Match(elems []stack.L2ELNode) []stack.L2ELNode { +func (v Vendor) Match(elems []stack.L2ELNode) []stack.L2ELNode { return WithLabel[stack.L2ELNodeID, stack.L2ELNode](LabelVendor, string(v)).Match(elems) } -func (v L2ELVendor) String() string { +func (v Vendor) String() string { return string(v) } diff --git a/op-devstack/stack/matcher.go b/op-devstack/stack/matcher.go index 7785dbdbde3fe..afe97cfeff94d 100644 --- a/op-devstack/stack/matcher.go +++ b/op-devstack/stack/matcher.go @@ -60,3 +60,5 @@ type FlashblocksBuilderMatcher = Matcher[FlashblocksBuilderID, FlashblocksBuilde type L2ELMatcher = Matcher[L2ELNodeID, L2ELNode] type FaucetMatcher = Matcher[FaucetID, Faucet] + +type SyncTesterMatcher = Matcher[SyncTesterID, SyncTester] diff --git a/op-devstack/stack/network.go b/op-devstack/stack/network.go index 3cde98f8c3619..8342f144eb19b 100644 --- a/op-devstack/stack/network.go +++ b/op-devstack/stack/network.go @@ -19,10 +19,15 @@ type Network interface { Faucet(m FaucetMatcher) Faucet Faucets() []Faucet FaucetIDs() []FaucetID + + SyncTester(m SyncTesterMatcher) SyncTester + SyncTesters() []SyncTester + SyncTesterIDs() []SyncTesterID } type ExtensibleNetwork interface { Network AddFaucet(f Faucet) + AddSyncTester(st SyncTester) } diff --git a/op-devstack/stack/sync_tester.go b/op-devstack/stack/sync_tester.go new file mode 100644 index 0000000000000..0601e4e40fbaa --- /dev/null +++ b/op-devstack/stack/sync_tester.go @@ -0,0 +1,76 @@ +package stack + +import ( + "log/slog" + + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// SyncTesterID identifies a syncTester by name and chainID, is type-safe, and can be value-copied and used as map key. +type SyncTesterID idWithChain + +var _ IDWithChain = (*SyncTesterID)(nil) + +const SyncTesterKind Kind = "SyncTester" + +func NewSyncTesterID(key string, chainID eth.ChainID) SyncTesterID { + return SyncTesterID{ + key: key, + chainID: chainID, + } +} + +func (id SyncTesterID) String() string { + return idWithChain(id).string(SyncTesterKind) +} + +func (id SyncTesterID) ChainID() eth.ChainID { + return idWithChain(id).chainID +} + +func (id SyncTesterID) Kind() Kind { + return SyncTesterKind +} + +func (id SyncTesterID) Key() string { + return id.key +} + +func (id SyncTesterID) LogValue() slog.Value { + return slog.StringValue(id.String()) +} + +func (id SyncTesterID) MarshalText() ([]byte, error) { + return idWithChain(id).marshalText(SyncTesterKind) +} + +func (id *SyncTesterID) UnmarshalText(data []byte) error { + return (*idWithChain)(id).unmarshalText(SyncTesterKind, data) +} + +func SortSyncTesterIDs(ids []SyncTesterID) []SyncTesterID { + return copyAndSort(ids, func(a, b SyncTesterID) bool { + return lessIDWithChain(idWithChain(a), idWithChain(b)) + }) +} + +func SortSyncTesters(elems []SyncTester) []SyncTester { + return copyAndSort(elems, func(a, b SyncTester) bool { + return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) + }) +} + +var _ SyncTesterMatcher = SyncTesterID{} + +func (id SyncTesterID) Match(elems []SyncTester) []SyncTester { + return findByID(id, elems) +} + +type SyncTester interface { + Common + ID() SyncTesterID + API() apis.SyncTester + + APIWithSession(sessionID string) apis.SyncTester +} diff --git a/op-devstack/stack/system.go b/op-devstack/stack/system.go index 0eae00b6a83a1..d96b91b67f62a 100644 --- a/op-devstack/stack/system.go +++ b/op-devstack/stack/system.go @@ -45,6 +45,7 @@ type ExtensibleSystem interface { AddL2Network(v L2Network) AddSupervisor(v Supervisor) AddTestSequencer(v TestSequencer) + AddSyncTester(v SyncTester) } type TimeTravelClock interface { diff --git a/op-devstack/sysext/helpers.go b/op-devstack/sysext/helpers.go index f51b0fcb449a3..cd36fb192d2a8 100644 --- a/op-devstack/sysext/helpers.go +++ b/op-devstack/sysext/helpers.go @@ -85,8 +85,18 @@ func (orch *Orchestrator) httpClient(t devtest.T, service *descriptors.Service, func (orch *Orchestrator) findProtocolService(service *descriptors.Service, protocol string) (string, http.Header, error) { for proto, endpoint := range service.Endpoints { if proto == protocol { - if orch.env.Env.ReverseProxyURL != "" && len(endpoint.ReverseProxyHeader) > 0 && !orch.useDirectCnx { - return orch.env.Env.ReverseProxyURL, endpoint.ReverseProxyHeader, nil + // Force direct connect for websocket protocols + if protocol != WebsocketFlashblocksProtocol { + if orch.env.Env.ReverseProxyURL != "" && len(endpoint.ReverseProxyHeader) > 0 && !orch.useDirectCnx { + // For WebSocket protocols, convert HTTP URL to WebSocket URL + if protocol == WebsocketFlashblocksProtocol { + wsURL := strings.NewReplacer("http://", "ws://", "https://", "wss://").Replace(orch.env.Env.ReverseProxyURL) + wsURL += "/ws" + + return wsURL, endpoint.ReverseProxyHeader, nil + } + return orch.env.Env.ReverseProxyURL, endpoint.ReverseProxyHeader, nil + } } port := endpoint.Port diff --git a/op-devstack/sysext/l2.go b/op-devstack/sysext/l2.go index bf38d1d6af2c1..571c60fda2bb9 100644 --- a/op-devstack/sysext/l2.go +++ b/op-devstack/sysext/l2.go @@ -119,11 +119,40 @@ func (o *Orchestrator) hydrateL2ELCL(node *descriptors.Node, l2Net stack.Extensi clService, ok := node.Services[CLServiceName] require.True(ok, "need L2 CL service for chain", l2ID) + var endpointString string + // Parse the endpoint from the service descriptor. + for proto, endpoint := range clService.Endpoints { + if proto == RPCProtocol { + port := endpoint.Port + if o.usePrivatePorts { + port = endpoint.PrivatePort + } + scheme := endpoint.Scheme + if scheme == "" { + scheme = HTTPProtocol + } + host := endpoint.Host + path := "" + if strings.Contains(host, "/") { + parts := strings.SplitN(host, "/", 2) + host = parts[0] + path = "/" + parts[1] + } + endpointString = fmt.Sprintf("%s://%s:%d%s", scheme, host, port, path) + break + } + } + + require.NotEmpty(endpointString, "no endpoint found for CL service", clService.Name) + + l2Net.Logger().Info("Found endpoint for CL service", "endpoint", endpointString) + clClient := o.rpcClient(l2Net.T(), clService, RPCProtocol, "/", opts...) l2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ ID: stack.NewL2CLNodeID(clService.Name, l2ID.ChainID()), CommonConfig: shim.NewCommonConfig(l2Net.T()), Client: clClient, + UserRPC: endpointString, }) l2Net.AddL2CLNode(l2CL) l2CL.(stack.LinkableL2CLNode).LinkEL(l2EL) @@ -177,7 +206,7 @@ func (o *Orchestrator) hydrateFlashblocksBuilderIfPresent(node *descriptors.Node associatedConductorService, ok := node.Services[ConductorServiceName] require.True(ok, "L2 rbuilder service must have an associated conductor service", l2ID) - flashblocksWsUrl, _, err := o.findProtocolService(rbuilderService, WebsocketFlashblocksProtocol) + flashblocksWsUrl, flashblocksWsHeaders, err := o.findProtocolService(rbuilderService, WebsocketFlashblocksProtocol) require.NoError(err, "failed to find websocket service for rbuilder") flashblocksBuilder := shim.NewFlashblocksBuilderNode(shim.FlashblocksBuilderNodeConfig{ @@ -187,8 +216,9 @@ func (o *Orchestrator) hydrateFlashblocksBuilderIfPresent(node *descriptors.Node Client: o.rpcClient(l2Net.T(), rbuilderService, RPCProtocol, "/", opts...), ChainID: l2ID.ChainID(), }, - Conductor: l2Net.Conductor(stack.ConductorID(associatedConductorService.Name)), - FlashblocksWsUrl: flashblocksWsUrl, + Conductor: l2Net.Conductor(stack.ConductorID(associatedConductorService.Name)), + FlashblocksWsUrl: flashblocksWsUrl, + FlashblocksWsHeaders: flashblocksWsHeaders, }) l2Net.AddFlashblocksBuilder(flashblocksBuilder) @@ -231,13 +261,14 @@ func (o *Orchestrator) hydrateFlashblocksWebsocketProxyMaybe(net *descriptors.L2 } for _, instance := range fbWsProxyService { - wsUrl, _, err := o.findProtocolService(instance, WebsocketFlashblocksProtocol) + wsUrl, wsHeaders, err := o.findProtocolService(instance, WebsocketFlashblocksProtocol) require.NoError(err, "failed to get the websocket url for the flashblocks websocket proxy", "service", instance.Name) fbWsProxyShim := shim.NewFlashblocksWebsocketProxy(shim.FlashblocksWebsocketProxyConfig{ CommonConfig: shim.NewCommonConfig(l2Net.T()), ID: stack.NewFlashblocksWebsocketProxyID(instance.Name, l2ID.ChainID()), WsUrl: wsUrl, + WsHeaders: wsHeaders, }) fbWsProxyShim.SetLabel(match.LabelVendor, string(match.FlashblocksWebsocketProxy)) l2Net.AddFlashblocksWebsocketProxy(fbWsProxyShim) diff --git a/op-devstack/sysext/system.go b/op-devstack/sysext/system.go index 7d76f6face2e8..6927517153fcf 100644 --- a/op-devstack/sysext/system.go +++ b/op-devstack/sysext/system.go @@ -2,10 +2,16 @@ package sysext import ( "encoding/json" + "fmt" "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" + client "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum/go-ethereum/common" + gn "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" ) func (o *Orchestrator) hydrateSuperchain(sys stack.ExtensibleSystem) { @@ -68,10 +74,53 @@ func (o *Orchestrator) hydrateSupervisorsMaybe(sys stack.ExtensibleSystem) { } func (o *Orchestrator) hydrateTestSequencersMaybe(sys stack.ExtensibleSystem) { - sys.AddTestSequencer(shim.NewTestSequencer(shim.TestSequencerConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.TestSequencerID("dummy"), - Client: nil, - ControlClients: nil, - })) + sequencers := make(map[stack.TestSequencerID]bool) + + // Collect all L2 chain IDs and the shared JWT secret + var ( + chainIDs []eth.ChainID + jwt string + ) + + for _, l2 := range o.env.Env.L2 { + chainID, _ := eth.ChainIDFromString(l2.Chain.ID) + chainIDs = append(chainIDs, chainID) + jwt = l2.JWT + } + + opts := []client.RPCOption{ + client.WithGethRPCOptions(rpc.WithHTTPAuth(gn.NewJWTAuth(common.HexToHash(jwt)))), + } + + for _, l2 := range o.env.Env.L2 { + if sequencerService, ok := l2.Services["test-sequencer"]; ok { + for _, instance := range sequencerService { + id := stack.TestSequencerID(instance.Name) + if sequencers[id] { + // Each test_sequencer appears in multiple L2s + // So we need to deduplicate + continue + } + sequencers[id] = true + + cc := make(map[eth.ChainID]client.RPC, len(chainIDs)) + for _, chainID := range chainIDs { + cc[chainID] = o.rpcClient( + sys.T(), + instance, + RPCProtocol, + fmt.Sprintf("/sequencers/sequencer-%s", chainID.String()), + opts..., + ) + } + + sys.AddTestSequencer(shim.NewTestSequencer(shim.TestSequencerConfig{ + CommonConfig: shim.NewCommonConfig(sys.T()), + ID: id, + Client: o.rpcClient(sys.T(), instance, RPCProtocol, "/", opts...), + ControlClients: cc, + })) + } + } + } } diff --git a/op-devstack/sysgo/control_plane_test.go b/op-devstack/sysgo/control_plane_test.go index b3b02e24dbbfd..6c601d4ffe382 100644 --- a/op-devstack/sysgo/control_plane_test.go +++ b/op-devstack/sysgo/control_plane_test.go @@ -3,7 +3,6 @@ package sysgo import ( "context" "errors" - "syscall" "testing" "time" @@ -81,7 +80,7 @@ func testSupervisorRestart(ids DefaultInteropSystemIDs, system stack.System, con return supervisor.QueryAPI().SyncStatus(ctx) }) cancel() - require.True(t, errors.Is(err, syscall.ECONNREFUSED)) + require.Error(t, err) } // restart supervisor @@ -119,14 +118,16 @@ func testL2CLRestart(ids DefaultInteropSystemIDs, system stack.System, control s // stop L2CL control.L2CLNodeState(ids.L2ACL, stack.Stop) - // L2CL API will not work since L2CL stopped + // L2CL API will still kind of work, it is not functioning, + // but since L2CL is behind a proxy, the proxy is still online, and may create a different error. + // The dial will be accepted, and the connection then closed, once the connection behind the proxy fails. { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) _, err := retry.Do[*eth.SyncStatus](ctx, 10, retry.Fixed(time.Millisecond*500), func() (*eth.SyncStatus, error) { return seqA.RollupAPI().SyncStatus(ctx) }) cancel() - require.True(t, errors.Is(err, syscall.ECONNREFUSED)) + require.Error(t, err, "should not be able to get sync-status when node behind proxy is offline") } // restart L2CL diff --git a/op-devstack/sysgo/deployer.go b/op-devstack/sysgo/deployer.go index b64c9be872ca2..4c204d5b9bea6 100644 --- a/op-devstack/sysgo/deployer.go +++ b/op-devstack/sysgo/deployer.go @@ -41,6 +41,12 @@ func WithDeployerOptions(opts ...DeployerOption) stack.Option[*Orchestrator] { type DeployerPipelineOption func(wb *worldBuilder, intent *state.Intent, cfg *deployer.ApplyPipelineOpts) +func WithDeployerCacheDir(dirPath string) DeployerPipelineOption { + return func(_ *worldBuilder, _ *state.Intent, cfg *deployer.ApplyPipelineOpts) { + cfg.CacheDir = dirPath + } +} + func WithDeployerPipelineOption(opt DeployerPipelineOption) stack.Option[*Orchestrator] { return stack.BeforeDeploy(func(o *Orchestrator) { o.deployerPipelineOptions = append(o.deployerPipelineOptions, opt) @@ -167,6 +173,13 @@ var ( millionEth = new(uint256.Int).Mul(uint256.NewInt(1e6), oneEth) ) +func WithEmbeddedContractSources() DeployerOption { + return func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + builder.WithL1ContractsLocator(artifacts.EmbeddedLocator) + builder.WithL2ContractsLocator(artifacts.EmbeddedLocator) + } +} + func WithLocalContractSources() DeployerOption { return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { paths, err := contractPaths() @@ -236,6 +249,13 @@ func WithPrefundedL2(l1ChainID, l2ChainID eth.ChainID) DeployerOption { } } +// WithDevFeatureBitmap sets the dev feature bitmap. +func WithDevFeatureBitmap(devFlags common.Hash) DeployerOption { + return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + builder.WithGlobalOverride("devFeatureBitmap", devFlags) + } +} + // WithInteropAtGenesis activates interop at genesis for all known L2s func WithInteropAtGenesis() DeployerOption { return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { @@ -245,6 +265,34 @@ func WithInteropAtGenesis() DeployerOption { } } +// WithHardforkSequentialActivation configures a deployment such that L2 chains +// activate hardforks sequentially, starting from startFork and continuing +// until (but not including) endFork. Each successive fork is scheduled at +// an increasing offset. +func WithHardforkSequentialActivation(startFork, endFork rollup.ForkName, delta *uint64) DeployerOption { + return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + for _, l2Cfg := range builder.L2s() { + l2Cfg.WithForkAtGenesis(startFork) + activateWithOffset := false + deactivate := false + for idx, refFork := range rollup.AllForks { + if deactivate || refFork == endFork { + l2Cfg.WithForkAtOffset(refFork, nil) + deactivate = true + continue + } + if activateWithOffset { + offset := *delta * uint64(idx) + l2Cfg.WithForkAtOffset(refFork, &offset) + } + if startFork == refFork { + activateWithOffset = true + } + } + } + } +} + // WithSequencingWindow overrides the number of L1 blocks in a sequencing window, applied to all L2s. func WithSequencingWindow(n uint64) DeployerOption { return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { diff --git a/op-devstack/sysgo/engine_client.go b/op-devstack/sysgo/engine_client.go new file mode 100644 index 0000000000000..d825cd6c5df52 --- /dev/null +++ b/op-devstack/sysgo/engine_client.go @@ -0,0 +1,93 @@ +package sysgo + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + gn "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + gethrpc "github.com/ethereum/go-ethereum/rpc" +) + +type engineClient struct { + inner *rpc.Client +} + +func dialEngine(ctx context.Context, endpoint string, jwtSecret [32]byte) (*engineClient, error) { + engineCl, err := gethrpc.DialOptions(ctx, endpoint, rpc.WithHTTPAuth(gn.NewJWTAuth(jwtSecret))) + if err != nil { + return nil, err + } + return &engineClient{ + inner: engineCl, + }, nil +} + +var _ geth.EngineAPI = (*engineClient)(nil) + +func (e *engineClient) forkchoiceUpdated(fs engine.ForkchoiceStateV1, pa *engine.PayloadAttributes, method string) (engine.ForkChoiceResponse, error) { + var x engine.ForkChoiceResponse + if err := e.inner.CallContext(context.Background(), &x, method, fs, pa); err != nil { + return engine.ForkChoiceResponse{}, err + } + return x, nil +} + +func (e *engineClient) ForkchoiceUpdatedV2(fs engine.ForkchoiceStateV1, pa *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + return e.forkchoiceUpdated(fs, pa, "engine_forkchoiceUpdatedV2") +} + +func (e *engineClient) ForkchoiceUpdatedV3(fs engine.ForkchoiceStateV1, pa *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) { + return e.forkchoiceUpdated(fs, pa, "engine_forkchoiceUpdatedV3") +} + +func (e *engineClient) getPayload(id engine.PayloadID, method string) (*engine.ExecutionPayloadEnvelope, error) { + var result engine.ExecutionPayloadEnvelope + if err := e.inner.CallContext(context.Background(), &result, method, id); err != nil { + return nil, err + } + return &result, nil +} + +func (e *engineClient) GetPayloadV2(id engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + return e.getPayload(id, "engine_getPayloadV2") +} + +func (e *engineClient) GetPayloadV3(id engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + return e.getPayload(id, "engine_getPayloadV3") +} + +func (e *engineClient) GetPayloadV4(id engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + return e.getPayload(id, "engine_getPayloadV4") +} + +func (e *engineClient) GetPayloadV5(id engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) { + return e.getPayload(id, "engine_getPayloadV5") +} + +func (e *engineClient) NewPayloadV2(data engine.ExecutableData) (engine.PayloadStatusV1, error) { + var result engine.PayloadStatusV1 + if err := e.inner.CallContext(context.Background(), &result, "engine_newPayloadV2", data); err != nil { + return engine.PayloadStatusV1{}, err + } + return result, nil +} + +func (e *engineClient) NewPayloadV3(data engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash) (engine.PayloadStatusV1, error) { + var result engine.PayloadStatusV1 + if err := e.inner.CallContext(context.Background(), &result, "engine_newPayloadV3", data, versionedHashes, beaconRoot); err != nil { + return engine.PayloadStatusV1{}, err + } + return result, nil +} + +func (e *engineClient) NewPayloadV4(data engine.ExecutableData, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (engine.PayloadStatusV1, error) { + var result engine.PayloadStatusV1 + if err := e.inner.CallContext(context.Background(), &result, "engine_newPayloadV4", data, versionedHashes, beaconRoot, executionRequests); err != nil { + return engine.PayloadStatusV1{}, err + } + return result, nil +} diff --git a/op-devstack/sysgo/faucet.go b/op-devstack/sysgo/faucet.go index c9b630c4d0180..3e377a08e4d1f 100644 --- a/op-devstack/sysgo/faucet.go +++ b/op-devstack/sysgo/faucet.go @@ -23,6 +23,10 @@ type FaucetService struct { } func (n *FaucetService) hydrate(system stack.ExtensibleSystem) { + if n == nil || n.service == nil { + return + } + require := system.T().Require() for faucetID, chainID := range n.service.Faucets() { @@ -71,7 +75,7 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio require.True(ok, "need L1 EL for faucet", elID) faucets[id] = &fconf.FaucetEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.userRPC)}, + ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, ChainID: elID.ChainID(), TxCfg: fconf.TxManagerConfig{ PrivateKey: funderKeyStr, @@ -86,7 +90,7 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio require.True(ok, "need L2 EL for faucet", elID) faucets[id] = &fconf.FaucetEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.userRPC)}, + ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, ChainID: elID.ChainID(), TxCfg: fconf.TxManagerConfig{ PrivateKey: funderKeyStr, @@ -94,7 +98,9 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio } } cfg := &config.Config{ - RPC: oprpc.CLIConfig{}, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, Faucets: &fconf.Config{ Faucets: faucets, }, diff --git a/op-devstack/sysgo/l1_nodes.go b/op-devstack/sysgo/l1_nodes.go index 58cad02fc8233..1f8d879434f21 100644 --- a/op-devstack/sysgo/l1_nodes.go +++ b/op-devstack/sysgo/l1_nodes.go @@ -1,25 +1,44 @@ package sysgo import ( + "os" "path/filepath" "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/clock" ) -type L1ELNode struct { +type L1ELNode interface { + hydrator + l1ELNode() + UserRPC() string + AuthRPC() string +} + +type L1Geth struct { id stack.L1ELNodeID userRPC string + authRPC string l1Geth *geth.GethInstance blobPath string } -func (n *L1ELNode) hydrate(system stack.ExtensibleSystem) { +func (*L1Geth) l1ELNode() {} + +func (g *L1Geth) UserRPC() string { + return g.userRPC +} + +func (g *L1Geth) AuthRPC() string { + return g.authRPC +} + +func (n *L1Geth) hydrate(system stack.ExtensibleSystem) { require := system.T().Require() rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) require.NoError(err) @@ -54,7 +73,18 @@ func (n *L1CLNode) hydrate(system stack.ExtensibleSystem) { l1Net.(stack.ExtensibleL1Network).AddL1CLNode(frontend) } +const DevstackL1ELKindEnvVar = "DEVSTACK_L1EL_KIND" + func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[*Orchestrator] { + switch os.Getenv(DevstackL1ELKindEnvVar) { + case "geth": + return WithL1NodesSubprocess(l1ELID, l1CLID) + default: + return WithL1NodesInProcess(l1ELID, l1CLID) + } +} + +func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { clP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1CLID)) elP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1ELID)) @@ -73,7 +103,7 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ blobPath := clP.TempDir() clLogger := clP.Logger() - bcn := fakebeacon.NewBeacon(clLogger, e2eutils.NewBlobStore(), l1Net.genesis.Timestamp, blockTimeL1) + bcn := fakebeacon.NewBeacon(clLogger, blobstore.New(), l1Net.genesis.Timestamp, blockTimeL1) clP.Cleanup(func() { _ = bcn.Close() }) @@ -81,6 +111,8 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ beaconApiAddr := bcn.BeaconAddr() require.NotEmpty(beaconApiAddr, "beacon API listener must be up") + orch.writeDefaultJWT() + elLogger := elP.Logger() l1Geth, fp, err := geth.InitL1( blockTimeL1, @@ -88,7 +120,9 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ l1Net.genesis, l1Clock, filepath.Join(blobPath, "l1_el"), - bcn) + bcn, + geth.WithAuth(orch.jwtPath), + ) require.NoError(err) require.NoError(l1Geth.Node.Start()) elP.Cleanup(func() { @@ -96,9 +130,10 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ _ = l1Geth.Close() }) - l1ELNode := &L1ELNode{ + l1ELNode := &L1Geth{ id: l1ELID, userRPC: l1Geth.Node.HTTPEndpoint(), + authRPC: l1Geth.Node.HTTPAuthEndpoint(), l1Geth: l1Geth, blobPath: blobPath, } @@ -113,3 +148,24 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ require.True(orch.l1CLs.SetIfMissing(l1CLID, l1CLNode), "must not already exist") }) } + +// WithExtL1Nodes initializes L1 EL and CL nodes that connect to external RPC endpoints +func WithExtL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, elRPCEndpoint string, clRPCEndpoint string) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + require := orch.P().Require() + + // Create L1 EL node with external RPC + l1ELNode := &L1Geth{ + id: l1ELID, + userRPC: elRPCEndpoint, + } + require.True(orch.l1ELs.SetIfMissing(l1ELID, l1ELNode), "must not already exist") + + // Create L1 CL node with external RPC + l1CLNode := &L1CLNode{ + id: l1CLID, + beaconHTTPAddr: clRPCEndpoint, + } + require.True(orch.l1CLs.SetIfMissing(l1CLID, l1CLNode), "must not already exist") + }) +} diff --git a/op-devstack/sysgo/l1_nodes_subprocess.go b/op-devstack/sysgo/l1_nodes_subprocess.go new file mode 100644 index 0000000000000..90ce7a4cfcfe7 --- /dev/null +++ b/op-devstack/sysgo/l1_nodes_subprocess.go @@ -0,0 +1,242 @@ +package sysgo + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "sync" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + "github.com/ethereum/go-ethereum/ethclient" +) + +type ExternalL1Geth struct { + mu sync.Mutex + + id stack.L1ELNodeID + l1Net *L1Network + // authRPC points to a proxy that forwards to geth's endpoint + authRPC string + // userRPC points to a proxy that forwards to geth's endpoint + userRPC string + + authProxy *tcpproxy.Proxy + userProxy *tcpproxy.Proxy + + execPath string + args []string + // Each entry is of the form "key=value". + env []string + + p devtest.P + + sub *SubProcess +} + +func (*ExternalL1Geth) l1ELNode() {} + +func (n *ExternalL1Geth) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + l1Net := system.L1Network(stack.L1NetworkID(n.id.ChainID())) + sysL1EL := shim.NewL1ELNode(shim.L1ELNodeConfig{ + ID: n.id, + ELNodeConfig: shim.ELNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + Client: rpcCl, + ChainID: n.id.ChainID(), + }, + }) + sysL1EL.SetLabel(match.LabelVendor, string(match.Geth)) + l1Net.(stack.ExtensibleL1Network).AddL1ELNode(sysL1EL) +} + +func (n *ExternalL1Geth) Start() { + n.mu.Lock() + defer n.mu.Unlock() + if n.sub != nil { + n.p.Logger().Warn("geth already started") + return + } + if n.authProxy == nil { + n.authProxy = tcpproxy.New(n.p.Logger()) + n.p.Require().NoError(n.authProxy.Start()) + n.p.Cleanup(func() { + n.authProxy.Close() + }) + n.authRPC = "ws://" + n.authProxy.Addr() + } + if n.userProxy == nil { + n.userProxy = tcpproxy.New(n.p.Logger()) + n.p.Require().NoError(n.userProxy.Start()) + n.p.Cleanup(func() { + n.userProxy.Close() + }) + n.userRPC = "ws://" + n.userProxy.Addr() + } + logOut := logpipe.ToLogger(n.p.Logger().New("src", "stdout")) + logErr := logpipe.ToLogger(n.p.Logger().New("src", "stderr")) + userRPC := make(chan string, 1) + authRPC := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "WebSocket enabled": + select { + case userRPC <- e.FieldValue("url").(string): + default: + } + case "HTTP server started": + if e.FieldValue("auth").(bool) { + select { + case authRPC <- "http://" + e.FieldValue("endpoint").(string): + default: + } + } + } + } + stdOutLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseGoStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseGoStructuredLogs(line) + logErr(e) + onLogEntry(e) + }) + n.sub = NewSubProcess(n.p, stdOutLogs, stdErrLogs) + + err := n.sub.Start(n.execPath, n.args, n.env) + n.p.Require().NoError(err, "Must start") + + var userRPCAddr, authRPCAddr string + n.p.Require().NoError(tasks.Await(n.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") + n.p.Require().NoError(tasks.Await(n.p.Ctx(), authRPC, &authRPCAddr), "need auth RPC") + + n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), userRPCAddr)) + n.authProxy.SetUpstream(ProxyAddr(n.p.Require(), authRPCAddr)) +} + +func (n *ExternalL1Geth) Stop() { + n.mu.Lock() + defer n.mu.Unlock() + err := n.sub.Stop() + n.p.Require().NoError(err, "Must stop") + n.sub = nil +} + +func (n *ExternalL1Geth) UserRPC() string { + return n.userRPC +} + +func (n *ExternalL1Geth) AuthRPC() string { + return n.authRPC +} + +const GethExecPathEnvVar = "SYSGO_GETH_EXEC_PATH" + +func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) + require := p.Require() + + execPath, ok := os.LookupEnv(GethExecPathEnvVar) + require.True(ok) + _, err := os.Stat(execPath) + p.Require().NotErrorIs(err, os.ErrNotExist, "geth executable must exist") + + l1Net, ok := orch.l1Nets.Get(id.ChainID()) + require.True(ok, "L1 network required") + + jwtPath, jwtSecret := orch.writeDefaultJWT() + + tempDir := p.TempDir() + data, err := json.Marshal(l1Net.genesis) + p.Require().NoError(err, "must json-encode genesis") + chainConfigPath := filepath.Join(tempDir, "genesis.json") + p.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") + + dataDirPath := filepath.Join(tempDir, "data") + p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") + + cmd := exec.Command(execPath, "--datadir", dataDirPath, "init", chainConfigPath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + require.NoError(cmd.Run(), "initialize geth datadir") + + args := []string{ + "--log.format", "json", + "--datadir", dataDirPath, + "--ws", "--ws.addr", "127.0.0.1", "--ws.port", "0", "--ws.origins", "*", "--ws.api", "admin,debug,eth,net,txpool", + "--authrpc.addr", "127.0.0.1", "--authrpc.port", "0", "--authrpc.jwtsecret", jwtPath, + "--ipcdisable", + "--nodiscover", + "--verbosity", "5", + "--miner.recommit", "2s", + "--gcmode", "archive", + } + + l1EL := &ExternalL1Geth{ + id: id, + l1Net: l1Net, + authRPC: "", + userRPC: "", + execPath: execPath, + args: args, + env: []string{}, + p: p, + } + + p.Logger().Info("Starting geth") + l1EL.Start() + p.Cleanup(l1EL.Stop) + p.Logger().Info("geth is ready", "userRPC", l1EL.userRPC, "authRPC", l1EL.authRPC) + require.True(orch.l1ELs.SetIfMissing(id, l1EL), "must be unique L2 EL node") + + backend, err := ethclient.DialContext(p.Ctx(), l1EL.userRPC) + require.NoError(err) + + l1Clock := clock.SystemClock + if orch.timeTravelClock != nil { + l1Clock = orch.timeTravelClock + } + + bcn := fakebeacon.NewBeacon(p.Logger(), blobstore.New(), l1Net.genesis.Timestamp, l1Net.blockTime) + p.Cleanup(func() { + _ = bcn.Close() + }) + require.NoError(bcn.Start("127.0.0.1:0")) + beaconApiAddr := bcn.BeaconAddr() + require.NotEmpty(beaconApiAddr, "beacon API listener must be up") + + engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), jwtSecret) + require.NoError(err) + fp := &FakePoS{ + p: p, + fakepos: geth.NewFakePoS(backend, engineCl, l1Clock, p.Logger(), l1Net.blockTime, 20, bcn, l1Net.genesis.Config), + } + fp.Start() + p.Cleanup(fp.Stop) + orch.l1CLs.Set(clID, &L1CLNode{ + id: clID, + beaconHTTPAddr: bcn.BeaconAddr(), + beacon: bcn, + fakepos: fp, + }) + }) +} diff --git a/op-devstack/sysgo/l2_batcher.go b/op-devstack/sysgo/l2_batcher.go index e5a11684d86ca..643858c18d1a6 100644 --- a/op-devstack/sysgo/l2_batcher.go +++ b/op-devstack/sysgo/l2_batcher.go @@ -86,9 +86,9 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st logger.Info("Batcher key acquired", "addr", crypto.PubkeyToAddress(batcherSecret.PublicKey)) batcherCLIConfig := &bss.CLIConfig{ - L1EthRpc: l1EL.userRPC, - L2EthRpc: []string{l2EL.userRPC}, - RollupRpc: []string{l2CL.userRPC}, + L1EthRpc: l1EL.UserRPC(), + L2EthRpc: []string{l2EL.UserRPC()}, + RollupRpc: []string{l2CL.UserRPC()}, MaxPendingTransactions: 1, MaxChannelDuration: 1, MaxL1TxSize: 120_000, @@ -97,7 +97,7 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st ApproxComprRatio: 0.4, SubSafetyMargin: 4, PollInterval: 500 * time.Millisecond, - TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.userRPC), batcherSecret), + TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), batcherSecret), LogConfig: oplog.CLIConfig{ Level: log.LevelInfo, Format: oplog.FormatText, @@ -132,9 +132,9 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st id: batcherID, service: batcher, rpc: batcher.HTTPEndpoint(), - l1RPC: l1EL.userRPC, - l2CLRPC: l2CL.userRPC, - l2ELRPC: l2EL.userRPC, + l1RPC: l1EL.UserRPC(), + l2CLRPC: l2CL.UserRPC(), + l2ELRPC: l2EL.UserRPC(), } orch.batchers.Set(batcherID, b) }) diff --git a/op-devstack/sysgo/l2_challenger.go b/op-devstack/sysgo/l2_challenger.go index db112b8b92f7e..4a56e1fc19458 100644 --- a/op-devstack/sysgo/l2_challenger.go +++ b/op-devstack/sysgo/l2_challenger.go @@ -101,6 +101,12 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2NetIDs = append(l2NetIDs, l2Net.id) } + l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + if !ok { + require.Fail("l1 network not found") + } + l1Genesis := l1Net.genesis + dir := p.TempDir() var cfg *config.Config // If interop is scheduled, or if we cannot do the pre-interop connection, then set up with supervisor @@ -113,16 +119,16 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen for i, l2ELID := range l2ELIDs { l2EL, ok := orch.l2ELs.Get(l2ELID) require.True(ok) - l2ELRPCs[i] = l2EL.userRPC + l2ELRPCs[i] = l2EL.UserRPC() } cluster, ok := orch.clusters.Get(*clusterID) require.True(ok) prestateVariant := shared.InteropVariant - cfg, err = shared.NewInteropChallengerConfig(dir, l1EL.userRPC, l1CL.beaconHTTPAddr, supervisorNode.userRPC, l2ELRPCs, + cfg, err = shared.NewInteropChallengerConfig(dir, l1EL.UserRPC(), l1CL.beaconHTTPAddr, supervisorNode.UserRPC(), l2ELRPCs, shared.WithFactoryAddress(disputeGameFactoryAddr), shared.WithPrivKey(challengerSecret), shared.WithDepset(cluster.DepSet()), - shared.WithCannonConfig(rollupCfgs, l2Geneses, prestateVariant), + shared.WithCannonConfig(rollupCfgs, l1Genesis, l2Geneses, prestateVariant), shared.WithSuperCannonTraceType(), shared.WithSuperPermissionedTraceType(), ) @@ -143,10 +149,10 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2EL, ok := orch.l2ELs.Get(l2ELID) require.True(ok) prestateVariant := shared.MTCannonVariant - cfg, err = shared.NewPreInteropChallengerConfig(dir, l1EL.userRPC, l1CL.beaconHTTPAddr, l2CL.userRPC, l2EL.userRPC, + cfg, err = shared.NewPreInteropChallengerConfig(dir, l1EL.UserRPC(), l1CL.beaconHTTPAddr, l2CL.UserRPC(), l2EL.UserRPC(), shared.WithFactoryAddress(disputeGameFactoryAddr), shared.WithPrivKey(challengerSecret), - shared.WithCannonConfig(rollupCfgs, l2Geneses, prestateVariant), + shared.WithCannonConfig(rollupCfgs, l1Genesis, l2Geneses, prestateVariant), shared.WithCannonTraceType(), shared.WithPermissionedTraceType(), shared.WithFastGames(), diff --git a/op-devstack/sysgo/l2_cl.go b/op-devstack/sysgo/l2_cl.go index 4341746d6d271..32f16a679c9a5 100644 --- a/op-devstack/sysgo/l2_cl.go +++ b/op-devstack/sysgo/l2_cl.go @@ -1,385 +1,99 @@ package sysgo import ( - "context" - "encoding/hex" - "flag" - "fmt" - "sync" - "time" + "os" - altda "github.com/ethereum-optimism/optimism/op-alt-da" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" - "github.com/ethereum-optimism/optimism/op-node/config" - opNodeFlags "github.com/ethereum-optimism/optimism/op-node/flags" - "github.com/ethereum-optimism/optimism/op-node/p2p" - p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" - "github.com/ethereum-optimism/optimism/op-node/rollup/driver" - "github.com/ethereum-optimism/optimism/op-node/rollup/interop" nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" - opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - "github.com/ethereum-optimism/optimism/op-service/retry" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum-optimism/optimism/op-service/testreq" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/urfave/cli/v2" ) -type L2CLNode struct { - mu sync.Mutex - - id stack.L2CLNodeID - opNode *opnode.Opnode - userRPC string - interopEndpoint string - interopJwtSecret eth.Bytes32 - cfg *config.Config - p devtest.P - logger log.Logger - el stack.L2ELNodeID +type L2CLNode interface { + hydrate(system stack.ExtensibleSystem) + stack.Lifecycle + UserRPC() string + InteropRPC() (endpoint string, jwtSecret eth.Bytes32) } -var _ stack.Lifecycle = (*L2CLNode)(nil) - -func (n *L2CLNode) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: n.id, - Client: rpcCl, - InteropEndpoint: n.interopEndpoint, - InteropJwtSecret: n.interopJwtSecret, - }) - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) - l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) - sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(n.el)) -} +type L2CLConfig struct { + // SyncMode to run, if this is a sequencer + SequencerSyncMode nodeSync.Mode + // SyncMode to run, if this is a verifier + VerifierSyncMode nodeSync.Mode -func (n *L2CLNode) rememberPort() { - userRPCPort, err := n.opNode.UserRPCPort() - n.p.Require().NoError(err) - n.cfg.RPC.ListenPort = userRPCPort + // SafeDBPath is the path to the safe DB to use. Disabled if empty. + SafeDBPath string - cfg, ok := n.cfg.InteropConfig.(*interop.Config) - n.p.Require().True(ok) + IsSequencer bool + IndexingMode bool - if interopRPCPort, err := n.opNode.InteropRPCPort(); err == nil { - cfg.RPCPort = interopRPCPort - } - n.cfg.InteropConfig = cfg + // EnableReqRespSync is the flag to enable/disable req-resp sync. + EnableReqRespSync bool } -func (n *L2CLNode) Start() { - n.mu.Lock() - defer n.mu.Unlock() - if n.opNode != nil { - n.logger.Warn("Op-node already started") - return - } - n.logger.Info("Starting op-node") - opNode, err := opnode.NewOpnode(n.logger, n.cfg, func(err error) { - n.p.Require().NoError(err, "op-node critical error") +func L2CLSequencer() L2CLOption { + return L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + cfg.IsSequencer = true }) - n.p.Require().NoError(err, "op-node failed to start") - n.logger.Info("Started op-node") - n.opNode = opNode - - // store endpoints to reuse when restart - n.userRPC = opNode.UserRPC().RPC() - interopEndpoint, interopJwtSecret := opNode.InteropRPC() - n.interopEndpoint = interopEndpoint - n.interopJwtSecret = interopJwtSecret - // for p2p endpoints / node keys, they are already persistent, stored at p2p configs +} - n.rememberPort() +func L2CLIndexing() L2CLOption { + return L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + cfg.IndexingMode = true + }) } -func (n *L2CLNode) Stop() { - n.mu.Lock() - defer n.mu.Unlock() - if n.opNode == nil { - n.logger.Warn("Op-node already stopped") - return +func DefaultL2CLConfig() *L2CLConfig { + return &L2CLConfig{ + SequencerSyncMode: nodeSync.CLSync, + VerifierSyncMode: nodeSync.CLSync, + SafeDBPath: "", + IsSequencer: false, + IndexingMode: false, + EnableReqRespSync: true, } - ctx, cancel := context.WithCancel(context.Background()) - cancel() // force-quit - n.logger.Info("Closing op-node") - closeErr := n.opNode.Stop(ctx) - n.logger.Info("Closed op-node", "err", closeErr) - - n.opNode = nil } -type L2CLOption func(p devtest.P, id stack.L2CLNodeID, cfg *config.Config) +type L2CLOption interface { + Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) +} -func WithL2CLOption(opt L2CLOption) stack.Option[*Orchestrator] { +// WithGlobalL2CLOption applies the L2CLOption to all L2CLNode instances in this orchestrator +func WithGlobalL2CLOption(opt L2CLOption) stack.Option[*Orchestrator] { return stack.BeforeDeploy(func(o *Orchestrator) { o.l2CLOptions = append(o.l2CLOptions, opt) }) } -func WithL2CLNode(l2CLID stack.L2CLNodeID, isSequencer bool, indexingMode bool, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - - require := p.Require() - - l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) - require.True(ok, "l2 network required") - - l1EL, ok := orch.l1ELs.Get(l1ELID) - require.True(ok, "l1 EL node required") - - l1CL, ok := orch.l1CLs.Get(l1CLID) - require.True(ok, "l1 CL node required") - - l2EL, ok := orch.l2ELs.Get(l2ELID) - require.True(ok, "l2 EL node required") - - var depSet depset.DependencySet - if cluster, ok := orch.ClusterForL2(l2ELID.ChainID()); ok { - depSet = cluster.DepSet() - } - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - logger := p.Logger() - - var p2pSignerSetup p2p.SignerSetup - var p2pConfig *p2p.Config - // code block for P2P setup - { - // make a dummy flagset since p2p config initialization helpers only input cli context - fs := flag.NewFlagSet("", flag.ContinueOnError) - // use default flags - for _, f := range opNodeFlags.P2PFlags(opNodeFlags.EnvVarPrefix) { - require.NoError(f.Apply(fs)) - } - // mandatory P2P flags - require.NoError(fs.Set(opNodeFlags.AdvertiseIPName, "127.0.0.1")) - require.NoError(fs.Set(opNodeFlags.AdvertiseTCPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.AdvertiseUDPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.ListenIPName, "127.0.0.1")) - require.NoError(fs.Set(opNodeFlags.ListenTCPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.ListenUDPPortName, "0")) - // avoid resource unavailable error by using memorydb - require.NoError(fs.Set(opNodeFlags.DiscoveryPathName, "memory")) - require.NoError(fs.Set(opNodeFlags.PeerstorePathName, "memory")) - // For peer ID - networkPrivKey, err := crypto.GenerateKey() - require.NoError(err) - networkPrivKeyHex := hex.EncodeToString(crypto.FromECDSA(networkPrivKey)) - require.NoError(fs.Set(opNodeFlags.P2PPrivRawName, networkPrivKeyHex)) - // Explicitly set to empty; do not default to resolving DNS of external bootnodes - require.NoError(fs.Set(opNodeFlags.BootnodesName, "")) - - cliCtx := cli.NewContext(&cli.App{}, fs, nil) - if isSequencer { - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer") - p2pKeyHex := hex.EncodeToString(crypto.FromECDSA(p2pKey)) - require.NoError(fs.Set(opNodeFlags.SequencerP2PKeyName, p2pKeyHex)) - p2pSignerSetup, err = p2pcli.LoadSignerSetup(cliCtx, logger) - require.NoError(err, "failed to load p2p signer") - logger.Info("Sequencer key acquired") - } - p2pConfig, err = p2pcli.NewConfig(cliCtx, l2Net.rollupCfg.BlockTime) - require.NoError(err, "failed to load p2p config") - } - - // specify interop config, but do not configure anything, to disable indexing mode - interopCfg := &interop.Config{} +type L2CLOptionFn func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) - if indexingMode { - interopCfg = &interop.Config{ - RPCAddr: "127.0.0.1", - // When L2CL starts, store its RPC port here - // given by the os, to reclaim when restart. - RPCPort: 0, - RPCJwtSecretPath: jwtPath, - } - } +var _ L2CLOption = L2CLOptionFn(nil) - nodeCfg := &config.Config{ - L1: &config.L1EndpointConfig{ - L1NodeAddr: l1EL.userRPC, - L1TrustRPC: false, - L1RPCKind: sources.RPCKindDebugGeth, - RateLimit: 0, - BatchSize: 20, - HttpPollInterval: time.Millisecond * 100, - MaxConcurrency: 10, - CacheSize: 0, // auto-adjust to sequence window - }, - L2: &config.L2EndpointConfig{ - L2EngineAddr: l2EL.authRPC, - L2EngineJWTSecret: jwtSecret, - }, - Beacon: &config.L1BeaconEndpointConfig{ - BeaconAddr: l1CL.beacon.BeaconAddr(), - }, - Driver: driver.Config{ - SequencerEnabled: isSequencer, - SequencerConfDepth: 2, - }, - Rollup: *l2Net.rollupCfg, - DependencySet: depSet, - P2PSigner: p2pSignerSetup, // nil when not sequencer - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - // When L2CL starts, store its RPC port here - // given by the os, to reclaim when restart. - ListenPort: 0, - EnableAdmin: true, - }, - InteropConfig: interopCfg, - P2P: p2pConfig, - L1EpochPollInterval: time.Second * 2, - RuntimeConfigReloadInterval: 0, - Tracer: nil, - Sync: nodeSync.Config{ - SyncMode: nodeSync.CLSync, - SkipSyncStartCheck: false, - SupportsPostFinalizationELSync: false, - }, - ConfigPersistence: config.DisabledConfigPersistence{}, - Metrics: opmetrics.CLIConfig{}, - Pprof: oppprof.CLIConfig{}, - SafeDBPath: "", - RollupHalt: "", - Cancel: nil, - ConductorEnabled: false, - ConductorRpc: nil, - ConductorRpcTimeout: 0, - AltDA: altda.CLIConfig{}, - IgnoreMissingPectraBlobSchedule: false, - ExperimentalOPStackAPI: true, - } - for _, opt := range orch.l2CLOptions { - opt(orch.P(), l2CLID, nodeCfg) - } - l2CLNode := &L2CLNode{ - id: l2CLID, - cfg: nodeCfg, - logger: logger, - p: p, - el: l2ELID, - } - require.True(orch.l2CLs.SetIfMissing(l2CLID, l2CLNode), "must not already exist") - l2CLNode.Start() - p.Cleanup(l2CLNode.Stop) - }) +func (fn L2CLOptionFn) Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + fn(p, id, cfg) } -func GetP2PClient(ctx context.Context, logger log.Logger, l2CLNode *L2CLNode) (*sources.P2PClient, error) { - rpcClient, err := client.NewRPC(ctx, logger, l2CLNode.userRPC, client.WithLazyDial()) - if err != nil { - return nil, fmt.Errorf("failed to initialize rpc client for p2p client: %w", err) - } - return sources.NewP2PClient(rpcClient), nil -} +// L2CLOptionBundle a list of multiple L2CLOption, to all be applied in order. +type L2CLOptionBundle []L2CLOption -func GetPeerInfo(ctx context.Context, p2pClient *sources.P2PClient) (*apis.PeerInfo, error) { - peerInfo, err := retry.Do(ctx, 3, retry.Exponential(), func() (*apis.PeerInfo, error) { - return p2pClient.Self(ctx) - }) - if err != nil { - return nil, fmt.Errorf("failed to get peer info: %w", err) - } - return peerInfo, nil -} +var _ L2CLOption = L2CLOptionBundle(nil) -func GetPeers(ctx context.Context, p2pClient *sources.P2PClient) (*apis.PeerDump, error) { - peerDump, err := retry.Do(ctx, 3, retry.Exponential(), func() (*apis.PeerDump, error) { - return p2pClient.Peers(ctx, true) - }) - if err != nil { - return nil, fmt.Errorf("failed to get peers: %w", err) +func (l L2CLOptionBundle) Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + for _, opt := range l { + p.Require().NotNil(opt, "cannot Apply nil L2CLOption") + opt.Apply(p, id, cfg) } - return peerDump, nil -} - -type p2pClientsAndPeers struct { - client1 *sources.P2PClient - client2 *sources.P2PClient - peerInfo1 *apis.PeerInfo - peerInfo2 *apis.PeerInfo } -func getP2PClientsAndPeers(ctx context.Context, logger log.Logger, require *testreq.Assertions, l2CL1, l2CL2 *L2CLNode) *p2pClientsAndPeers { - p2pClient1, err := GetP2PClient(ctx, logger, l2CL1) - require.NoError(err) - p2pClient2, err := GetP2PClient(ctx, logger, l2CL2) - require.NoError(err) - - peerInfo1, err := GetPeerInfo(ctx, p2pClient1) - require.NoError(err) - peerInfo2, err := GetPeerInfo(ctx, p2pClient2) - require.NoError(err) - - require.True(len(peerInfo1.Addresses) > 0 && len(peerInfo2.Addresses) > 0, "malformed peer info") - - return &p2pClientsAndPeers{ - client1: p2pClient1, - client2: p2pClient2, - peerInfo1: peerInfo1, - peerInfo2: peerInfo2, +// WithL2CLNode adds the default type of L2 CL node. +// The default can be configured with DEVSTACK_L2CL_KIND. +// Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. +func WithL2CLNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { + switch os.Getenv("DEVSTACK_L2CL_KIND") { + case "kona": + return WithKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) + default: + return WithOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) } } - -// WithL2CLP2PConnection connects P2P between two L2CLs -func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.L2CLNodeID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - l2CL1, ok := orch.l2CLs.Get(l2CL1ID) - require.True(ok, "looking for L2 CL node 1 to connect p2p") - l2CL2, ok := orch.l2CLs.Get(l2CL2ID) - require.True(ok, "looking for L2 CL node 2 to connect p2p") - require.Equal(l2CL1.cfg.Rollup.L2ChainID, l2CL2.cfg.Rollup.L2ChainID, "must be same l2 chain") - - ctx := orch.P().Ctx() - logger := orch.P().Logger() - - p := getP2PClientsAndPeers(ctx, logger, require, l2CL1, l2CL2) - - connectPeer := func(p2pClient *sources.P2PClient, multiAddress string) { - err := retry.Do0(ctx, 6, retry.Exponential(), func() error { - return p2pClient.ConnectPeer(ctx, multiAddress) - }) - require.NoError(err, "failed to connect peer") - } - - connectPeer(p.client1, p.peerInfo2.Addresses[0]) - connectPeer(p.client2, p.peerInfo1.Addresses[0]) - - check := func(peerDump *apis.PeerDump, peerInfo *apis.PeerInfo) { - multiAddress := peerInfo.PeerID.String() - _, ok := peerDump.Peers[multiAddress] - require.True(ok, "peer register invalid") - } - - peerDump1, err := GetPeers(ctx, p.client1) - require.NoError(err) - peerDump2, err := GetPeers(ctx, p.client2) - require.NoError(err) - - check(peerDump1, p.peerInfo2) - check(peerDump2, p.peerInfo1) - }) -} diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go new file mode 100644 index 0000000000000..52e5655dc928f --- /dev/null +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -0,0 +1,243 @@ +package sysgo + +import ( + "encoding/hex" + "encoding/json" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/ethereum/go-ethereum/crypto" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" +) + +type KonaNode struct { + mu sync.Mutex + + id stack.L2CLNodeID + + userRPC string + interopEndpoint string // warning: currently not fully supported + interopJwtSecret eth.Bytes32 + el stack.L2ELNodeID + + userProxy *tcpproxy.Proxy + + execPath string + args []string + // Each entry is of the form "key=value". + env []string + + p devtest.P + + sub *SubProcess +} + +func (k *KonaNode) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), k.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + ID: k.id, + Client: rpcCl, + UserRPC: k.userRPC, + InteropEndpoint: k.interopEndpoint, + InteropJwtSecret: k.interopJwtSecret, + }) + sysL2CL.SetLabel(match.LabelVendor, string(match.KonaNode)) + l2Net := system.L2Network(stack.L2NetworkID(k.id.ChainID())) + l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) + sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(k.el)) +} + +func (k *KonaNode) Start() { + k.mu.Lock() + defer k.mu.Unlock() + if k.sub != nil { + k.p.Logger().Warn("Kona-node already started") + return + } + // Create a proxy for the user RPC, + // so other services can connect, and stay connected, across restarts. + if k.userProxy == nil { + k.userProxy = tcpproxy.New(k.p.Logger()) + k.p.Require().NoError(k.userProxy.Start()) + k.p.Cleanup(func() { + k.userProxy.Close() + }) + k.userRPC = "http://" + k.userProxy.Addr() + } + // Create the sub-process. + // We pipe sub-process logs to the test-logger. + // And inspect them along the way, to get the RPC server address. + logOut := logpipe.ToLogger(k.p.Logger().New("src", "stdout")) + logErr := logpipe.ToLogger(k.p.Logger().New("src", "stderr")) + userRPC := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "RPC server bound to address": + userRPC <- "http://" + e.FieldValue("addr").(string) + } + } + stdOutLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logErr(e) + }) + k.sub = NewSubProcess(k.p, stdOutLogs, stdErrLogs) + + err := k.sub.Start(k.execPath, k.args, k.env) + k.p.Require().NoError(err, "Must start") + + var userRPCAddr string + k.p.Require().NoError(tasks.Await(k.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") + + k.userProxy.SetUpstream(ProxyAddr(k.p.Require(), userRPCAddr)) +} + +// Stop stops the kona node. +// warning: no restarts supported yet, since the RPC port is not remembered. +func (k *KonaNode) Stop() { + k.mu.Lock() + defer k.mu.Unlock() + if k.sub == nil { + k.p.Logger().Warn("kona-node already stopped") + return + } + err := k.sub.Stop() + k.p.Require().NoError(err, "Must stop") + k.sub = nil +} + +func (k *KonaNode) UserRPC() string { + return k.userRPC +} + +func (k *KonaNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { + return k.interopEndpoint, k.interopJwtSecret +} + +var _ L2CLNode = (*KonaNode)(nil) + +func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) + + require := p.Require() + + l1Net, ok := orch.l1Nets.Get(l1CLID.ChainID()) + require.True(ok, "l1 network required") + + l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) + require.True(ok, "l2 network required") + + l1ChainConfig := l1Net.genesis.Config + + l1EL, ok := orch.l1ELs.Get(l1ELID) + require.True(ok, "l1 EL node required") + + l1CL, ok := orch.l1CLs.Get(l1CLID) + require.True(ok, "l1 CL node required") + + l2EL, ok := orch.l2ELs.Get(l2ELID) + require.True(ok, "l2 EL node required") + + cfg := DefaultL2CLConfig() + orch.l2CLOptions.Apply(orch.P(), l2CLID, cfg) // apply global options + L2CLOptionBundle(opts).Apply(orch.P(), l2CLID, cfg) // apply specific options + + tempKonaDir := p.TempDir() + + tempP2PPath := filepath.Join(tempKonaDir, "p2pkey.txt") + + tempRollupCfgPath := filepath.Join(tempKonaDir, "rollup.json") + rollupCfgData, err := json.Marshal(l2Net.rollupCfg) + p.Require().NoError(err, "must write rollup config") + p.Require().NoError(err, os.WriteFile(tempRollupCfgPath, rollupCfgData, 0o644)) + + tempL1CfgPath := filepath.Join(tempKonaDir, "l1-chain-config.json") + l1CfgData, err := json.Marshal(l1ChainConfig) + p.Require().NoError(err, "must write l1 chain config") + p.Require().NoError(err, os.WriteFile(tempL1CfgPath, l1CfgData, 0o644)) + + envVars := []string{ + "KONA_NODE_L1_ETH_RPC=" + l1EL.UserRPC(), + "KONA_NODE_L1_BEACON=" + l1CL.beaconHTTPAddr, + // TODO: WS RPC addresses do not work and will make the startup panic with a connection error in the + // JWT validation / engine-capabilities setup code-path. + "KONA_NODE_L2_ENGINE_RPC=" + strings.ReplaceAll(l2EL.EngineRPC(), "ws://", "http://"), + "KONA_NODE_L2_ENGINE_AUTH=" + l2EL.JWTPath(), + "KONA_NODE_ROLLUP_CONFIG=" + tempRollupCfgPath, + "KONA_NODE_L1_CHAIN_CONFIG=" + tempL1CfgPath, + "KONA_NODE_P2P_NO_DISCOVERY=true", + "KONA_NODE_P2P_PRIV_PATH=" + tempP2PPath, + "KONA_NODE_RPC_ADDR=127.0.0.1", + "KONA_NODE_RPC_PORT=0", + "KONA_NODE_RPC_WS_ENABLED=true", + "KONA_METRICS_ENABLED=false", + "KONA_LOG_LEVEL=3", // info level + "KONA_LOG_STDOUT_FORMAT=json", + // p2p ports + "KONA_NODE_P2P_LISTEN_IP=127.0.0.1", + "KONA_NODE_P2P_LISTEN_TCP_PORT=0", + "KONA_NODE_P2P_LISTEN_UDP_PORT=0", + } + if cfg.IsSequencer { + p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) + require.NoError(err, "need p2p key for sequencer") + p2pKeyHex := "0x" + hex.EncodeToString(crypto.FromECDSA(p2pKey)) + // TODO: Kona should support loading keys from a file + //tempSeqKeyPath := filepath.Join(tempKonaDir, "p2p-sequencer.txt") + //p.Require().NoError(err, os.WriteFile(tempSeqKeyPath, []byte(p2pKeyHex), 0o644)) + envVars = append(envVars, + "KONA_NODE_P2P_SEQUENCER_KEY="+p2pKeyHex, + "KONA_NODE_SEQUENCER_L1_CONFS=2", + "KONA_NODE_MODE=Sequencer", + ) + } else { + envVars = append(envVars, + "KONA_NODE_MODE=Validator", + ) + } + + execPath := os.Getenv("KONA_NODE_EXEC_PATH") + p.Require().NotEmpty(execPath, "KONA_NODE_EXEC_PATH environment variable must be set") + _, err = os.Stat(execPath) + p.Require().NotErrorIs(err, os.ErrNotExist, "executable must exist") + + k := &KonaNode{ + id: l2CLID, + userRPC: "", // retrieved from logs + interopEndpoint: "", // retrieved from logs + interopJwtSecret: eth.Bytes32{}, + el: l2ELID, + execPath: execPath, + args: []string{"node"}, + env: envVars, + p: p, + } + p.Logger().Info("Starting kona-node") + k.Start() + p.Cleanup(k.Stop) + p.Logger().Info("Kona-node is up", "rpc", k.UserRPC()) + require.True(orch.l2CLs.SetIfMissing(l2CLID, k), "must not already exist") + }) +} diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go new file mode 100644 index 0000000000000..99fcc60b06694 --- /dev/null +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -0,0 +1,323 @@ +package sysgo + +import ( + "context" + "encoding/hex" + "flag" + "fmt" + "sync" + "time" + + "github.com/urfave/cli/v2" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + + altda "github.com/ethereum-optimism/optimism/op-alt-da" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" + "github.com/ethereum-optimism/optimism/op-node/config" + opNodeFlags "github.com/ethereum-optimism/optimism/op-node/flags" + "github.com/ethereum-optimism/optimism/op-node/p2p" + p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" + "github.com/ethereum-optimism/optimism/op-node/rollup/driver" + "github.com/ethereum-optimism/optimism/op-node/rollup/interop" + nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" +) + +type OpNode struct { + mu sync.Mutex + + id stack.L2CLNodeID + opNode *opnode.Opnode + userRPC string + interopEndpoint string + interopJwtSecret eth.Bytes32 + cfg *config.Config + p devtest.P + logger log.Logger + el *stack.L2ELNodeID // Optional: nil when using SyncTester + userProxy *tcpproxy.Proxy + interopProxy *tcpproxy.Proxy +} + +var _ L2CLNode = (*OpNode)(nil) + +func (n *OpNode) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + ID: n.id, + Client: rpcCl, + UserRPC: n.userRPC, + InteropEndpoint: n.interopEndpoint, + InteropJwtSecret: n.interopJwtSecret, + }) + sysL2CL.SetLabel(match.LabelVendor, string(match.OpNode)) + l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) + if n.el != nil { + sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(n.el)) + } +} + +func (n *OpNode) UserRPC() string { + return n.userRPC +} + +func (n *OpNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { + // Make sure to use the proxied interop endpoint + return n.interopEndpoint, n.interopJwtSecret +} + +func (n *OpNode) Start() { + n.mu.Lock() + defer n.mu.Unlock() + if n.opNode != nil { + n.logger.Warn("Op-node already started") + return + } + + if n.userProxy == nil { + n.userProxy = tcpproxy.New(n.logger.New("proxy", "l2cl-user")) + n.p.Require().NoError(n.userProxy.Start()) + n.p.Cleanup(func() { + n.userProxy.Close() + }) + n.userRPC = "http://" + n.userProxy.Addr() + } + if n.interopProxy == nil { + n.interopProxy = tcpproxy.New(n.logger.New("proxy", "l2cl-interop")) + n.p.Require().NoError(n.interopProxy.Start()) + n.p.Cleanup(func() { + n.interopProxy.Close() + }) + n.interopEndpoint = "ws://" + n.interopProxy.Addr() + } + n.logger.Info("Starting op-node") + opNode, err := opnode.NewOpnode(n.logger, n.cfg, func(err error) { + n.p.Require().NoError(err, "op-node critical error") + }) + n.p.Require().NoError(err, "op-node failed to start") + n.logger.Info("Started op-node") + n.opNode = opNode + + n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), opNode.UserRPC().RPC())) + + interopEndpoint, interopJwtSecret := opNode.InteropRPC() + n.interopProxy.SetUpstream(ProxyAddr(n.p.Require(), interopEndpoint)) + n.interopJwtSecret = interopJwtSecret +} + +func (n *OpNode) Stop() { + n.mu.Lock() + defer n.mu.Unlock() + if n.opNode == nil { + n.logger.Warn("Op-node already stopped") + return + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-quit + n.logger.Info("Closing op-node") + closeErr := n.opNode.Stop(ctx) + n.logger.Info("Closed op-node", "err", closeErr) + + n.opNode = nil +} + +func WithOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) + + require := p.Require() + + l1Net, ok := orch.l1Nets.Get(l1CLID.ChainID()) + require.True(ok, "l1 network required") + + l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) + require.True(ok, "l2 network required") + + l1EL, ok := orch.l1ELs.Get(l1ELID) + require.True(ok, "l1 EL node required") + + l1CL, ok := orch.l1CLs.Get(l1CLID) + require.True(ok, "l1 CL node required") + + // Get the L2EL node (which can be a regular EL node or a SyncTesterEL) + l2EL, ok := orch.l2ELs.Get(l2ELID) + require.True(ok, "l2 EL node required") + + // Get dependency set from cluster if available + var depSet depset.DependencySet + if cluster, ok := orch.ClusterForL2(l2ELID.ChainID()); ok { + depSet = cluster.DepSet() + } + + cfg := DefaultL2CLConfig() + orch.l2CLOptions.Apply(p, l2CLID, cfg) // apply global options + L2CLOptionBundle(opts).Apply(p, l2CLID, cfg) // apply specific options + + syncMode := cfg.VerifierSyncMode + if cfg.IsSequencer { + syncMode = cfg.SequencerSyncMode + // Sanity check, to navigate legacy sync-mode test assumptions. + // Can't enable ELSync on the sequencer or it will never start sequencing because + // ELSync needs to receive gossip from the sequencer to drive the sync + p.Require().NotEqual(nodeSync.ELSync, syncMode, "sequencer cannot use EL sync") + } + + jwtPath, jwtSecret := orch.writeDefaultJWT() + + logger := p.Logger() + + var p2pSignerSetup p2p.SignerSetup + var p2pConfig *p2p.Config + // code block for P2P setup + { + // make a dummy flagset since p2p config initialization helpers only input cli context + fs := flag.NewFlagSet("", flag.ContinueOnError) + // use default flags + for _, f := range opNodeFlags.P2PFlags(opNodeFlags.EnvVarPrefix) { + require.NoError(f.Apply(fs)) + } + // mandatory P2P flags + require.NoError(fs.Set(opNodeFlags.AdvertiseIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.AdvertiseTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.AdvertiseUDPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.ListenTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenUDPPortName, "0")) + // avoid resource unavailable error by using memorydb + require.NoError(fs.Set(opNodeFlags.DiscoveryPathName, "memory")) + require.NoError(fs.Set(opNodeFlags.PeerstorePathName, "memory")) + // For peer ID + networkPrivKey, err := crypto.GenerateKey() + require.NoError(err) + networkPrivKeyHex := hex.EncodeToString(crypto.FromECDSA(networkPrivKey)) + require.NoError(fs.Set(opNodeFlags.P2PPrivRawName, networkPrivKeyHex)) + // Explicitly set to empty; do not default to resolving DNS of external bootnodes + require.NoError(fs.Set(opNodeFlags.BootnodesName, "")) + + cliCtx := cli.NewContext(&cli.App{}, fs, nil) + if cfg.IsSequencer { + p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) + require.NoError(err, "need p2p key for sequencer") + p2pKeyHex := hex.EncodeToString(crypto.FromECDSA(p2pKey)) + require.NoError(fs.Set(opNodeFlags.SequencerP2PKeyName, p2pKeyHex)) + p2pSignerSetup, err = p2pcli.LoadSignerSetup(cliCtx, logger) + require.NoError(err, "failed to load p2p signer") + logger.Info("Sequencer key acquired") + } + p2pConfig, err = p2pcli.NewConfig(cliCtx, l2Net.rollupCfg.BlockTime) + require.NoError(err, "failed to load p2p config") + } + + // specify interop config, but do not configure anything, to disable indexing mode + interopCfg := &interop.Config{} + + if cfg.IndexingMode { + interopCfg = &interop.Config{ + RPCAddr: "127.0.0.1", + // When L2CL starts, store its RPC port here + // given by the os, to reclaim when restart. + RPCPort: 0, + RPCJwtSecretPath: jwtPath, + } + } + + // Set the req-resp sync flag as per config + p2pConfig.EnableReqRespSync = cfg.EnableReqRespSync + + // Get the L2 engine address from the EL node (which can be a regular EL node or a SyncTesterEL) + l2EngineAddr := l2EL.EngineRPC() + + nodeCfg := &config.Config{ + L1: &config.L1EndpointConfig{ + L1NodeAddr: l1EL.UserRPC(), + L1TrustRPC: false, + L1RPCKind: sources.RPCKindDebugGeth, + RateLimit: 0, + BatchSize: 20, + HttpPollInterval: time.Millisecond * 100, + MaxConcurrency: 10, + CacheSize: 0, // auto-adjust to sequence window + }, + L1ChainConfig: l1Net.genesis.Config, + L2: &config.L2EndpointConfig{ + L2EngineAddr: l2EngineAddr, + L2EngineJWTSecret: jwtSecret, + }, + Beacon: &config.L1BeaconEndpointConfig{ + BeaconAddr: l1CL.beaconHTTPAddr, + }, + Driver: driver.Config{ + SequencerEnabled: cfg.IsSequencer, + SequencerConfDepth: 2, + }, + Rollup: *l2Net.rollupCfg, + DependencySet: depSet, + P2PSigner: p2pSignerSetup, // nil when not sequencer + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + // When L2CL starts, store its RPC port here + // given by the os, to reclaim when restart. + ListenPort: 0, + EnableAdmin: true, + }, + InteropConfig: interopCfg, + P2P: p2pConfig, + L1EpochPollInterval: time.Second * 2, + RuntimeConfigReloadInterval: 0, + Tracer: nil, + Sync: nodeSync.Config{ + SyncMode: syncMode, + SkipSyncStartCheck: false, + SupportsPostFinalizationELSync: false, + }, + ConfigPersistence: config.DisabledConfigPersistence{}, + Metrics: opmetrics.CLIConfig{}, + Pprof: oppprof.CLIConfig{}, + SafeDBPath: "", + RollupHalt: "", + Cancel: nil, + ConductorEnabled: false, + ConductorRpc: nil, + ConductorRpcTimeout: 0, + AltDA: altda.CLIConfig{}, + IgnoreMissingPectraBlobSchedule: false, + ExperimentalOPStackAPI: true, + } + if cfg.SafeDBPath != "" { + nodeCfg.SafeDBPath = cfg.SafeDBPath + } + + l2CLNode := &OpNode{ + id: l2CLID, + cfg: nodeCfg, + logger: logger, + p: p, + } + + // Set the EL field to link to the L2EL node + l2CLNode.el = &l2ELID + require.True(orch.l2CLs.SetIfMissing(l2CLID, l2CLNode), fmt.Sprintf("must not already exist: %s", l2CLID)) + l2CLNode.Start() + p.Cleanup(l2CLNode.Stop) + }) +} diff --git a/op-devstack/sysgo/l2_cl_p2p_util.go b/op-devstack/sysgo/l2_cl_p2p_util.go new file mode 100644 index 0000000000000..911434cd28e3c --- /dev/null +++ b/op-devstack/sysgo/l2_cl_p2p_util.go @@ -0,0 +1,114 @@ +package sysgo + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testreq" +) + +func GetP2PClient(ctx context.Context, logger log.Logger, l2CLNode L2CLNode) (*sources.P2PClient, error) { + rpcClient, err := client.NewRPC(ctx, logger, l2CLNode.UserRPC(), client.WithLazyDial()) + if err != nil { + return nil, fmt.Errorf("failed to initialize rpc client for p2p client: %w", err) + } + return sources.NewP2PClient(rpcClient), nil +} + +func GetPeerInfo(ctx context.Context, p2pClient *sources.P2PClient) (*apis.PeerInfo, error) { + peerInfo, err := retry.Do(ctx, 3, retry.Exponential(), func() (*apis.PeerInfo, error) { + return p2pClient.Self(ctx) + }) + if err != nil { + return nil, fmt.Errorf("failed to get peer info: %w", err) + } + return peerInfo, nil +} + +func GetPeers(ctx context.Context, p2pClient *sources.P2PClient) (*apis.PeerDump, error) { + peerDump, err := retry.Do(ctx, 3, retry.Exponential(), func() (*apis.PeerDump, error) { + return p2pClient.Peers(ctx, true) + }) + if err != nil { + return nil, fmt.Errorf("failed to get peers: %w", err) + } + return peerDump, nil +} + +type p2pClientsAndPeers struct { + client1 *sources.P2PClient + client2 *sources.P2PClient + peerInfo1 *apis.PeerInfo + peerInfo2 *apis.PeerInfo +} + +func getP2PClientsAndPeers(ctx context.Context, logger log.Logger, + require *testreq.Assertions, l2CL1, l2CL2 L2CLNode) *p2pClientsAndPeers { + p2pClient1, err := GetP2PClient(ctx, logger, l2CL1) + require.NoError(err) + p2pClient2, err := GetP2PClient(ctx, logger, l2CL2) + require.NoError(err) + + peerInfo1, err := GetPeerInfo(ctx, p2pClient1) + require.NoError(err) + peerInfo2, err := GetPeerInfo(ctx, p2pClient2) + require.NoError(err) + + require.True(len(peerInfo1.Addresses) > 0 && len(peerInfo2.Addresses) > 0, "malformed peer info") + + return &p2pClientsAndPeers{ + client1: p2pClient1, + client2: p2pClient2, + peerInfo1: peerInfo1, + peerInfo2: peerInfo2, + } +} + +// WithL2CLP2PConnection connects P2P between two L2CLs +func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.L2CLNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + require := orch.P().Require() + + l2CL1, ok := orch.l2CLs.Get(l2CL1ID) + require.True(ok, "looking for L2 CL node 1 to connect p2p") + l2CL2, ok := orch.l2CLs.Get(l2CL2ID) + require.True(ok, "looking for L2 CL node 2 to connect p2p") + require.Equal(l2CL1ID.ChainID(), l2CL2ID.ChainID(), "must be same l2 chain") + + ctx := orch.P().Ctx() + logger := orch.P().Logger() + + p := getP2PClientsAndPeers(ctx, logger, require, l2CL1, l2CL2) + + connectPeer := func(p2pClient *sources.P2PClient, multiAddress string) { + err := retry.Do0(ctx, 6, retry.Exponential(), func() error { + return p2pClient.ConnectPeer(ctx, multiAddress) + }) + require.NoError(err, "failed to connect peer") + } + + connectPeer(p.client1, p.peerInfo2.Addresses[0]) + connectPeer(p.client2, p.peerInfo1.Addresses[0]) + + check := func(peerDump *apis.PeerDump, peerInfo *apis.PeerInfo) { + multiAddress := peerInfo.PeerID.String() + _, ok := peerDump.Peers[multiAddress] + require.True(ok, "peer register invalid") + } + + peerDump1, err := GetPeers(ctx, p.client1) + require.NoError(err) + peerDump2, err := GetPeers(ctx, p.client2) + require.NoError(err) + + check(peerDump1, p.peerInfo2) + check(peerDump2, p.peerInfo1) + }) +} diff --git a/op-devstack/sysgo/l2_el.go b/op-devstack/sysgo/l2_el.go index d5742afc79aab..19501a327668a 100644 --- a/op-devstack/sysgo/l2_el.go +++ b/op-devstack/sysgo/l2_el.go @@ -1,209 +1,89 @@ package sysgo import ( - "context" - "net/url" - "slices" - "strconv" - "sync" - "time" + "os" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/dial" - "github.com/ethereum-optimism/optimism/op-service/testreq" - "github.com/ethereum/go-ethereum/eth/ethconfig" - "github.com/ethereum/go-ethereum/log" - gn "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/p2p" ) -type L2ELNode struct { - mu sync.Mutex - - p devtest.P - logger log.Logger - id stack.L2ELNodeID - l2Net *L2Network - jwtPath string - supervisorRPC string - l2Geth *geth.GethInstance +type L2ELNode interface { + hydrate(system stack.ExtensibleSystem) + stack.Lifecycle + UserRPC() string + EngineRPC() string + JWTPath() string +} - authRPC string - userRPC string +type L2ELConfig struct { + SupervisorID *stack.SupervisorID } -func (n *L2ELNode) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) - sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ - RollupCfg: l2Net.RollupConfig(), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - ID: n.id, +func L2ELWithSupervisor(supervisorID stack.SupervisorID) L2ELOption { + return L2ELOptionFn(func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { + cfg.SupervisorID = &supervisorID }) - sysL2EL.SetLabel(match.LabelVendor, string(match.OpGeth)) - l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) } -func (n *L2ELNode) Start() { - n.mu.Lock() - defer n.mu.Unlock() - if n.l2Geth != nil { - n.logger.Warn("op-geth already started") - return +func DefaultL2ELConfig() *L2ELConfig { + return &L2ELConfig{ + SupervisorID: nil, } - - require := n.p.Require() - l2Geth, err := geth.InitL2(n.id.String(), n.l2Net.genesis, n.jwtPath, - func(ethCfg *ethconfig.Config, nodeCfg *gn.Config) error { - ethCfg.InteropMessageRPC = n.supervisorRPC - ethCfg.InteropMempoolFiltering = true - nodeCfg.P2P = p2p.Config{ - NoDiscovery: true, - ListenAddr: "127.0.0.1:0", - MaxPeers: 10, - } - if n.authRPC != "" { - // Preserve the existing auth rpc port - nodeCfg.AuthPort = rpcPort(require, n.authRPC) - } - if n.userRPC != "" { - // Preserve the existing websocket rpc port - nodeCfg.WSPort = rpcPort(require, n.userRPC) - } - return nil - }) - require.NoError(err) - require.NoError(l2Geth.Node.Start()) - n.l2Geth = l2Geth - n.authRPC = l2Geth.AuthRPC().RPC() - n.userRPC = l2Geth.UserRPC().RPC() } -func rpcPort(require *testreq.Assertions, rpc string) int { - u, err := url.Parse(rpc) - require.NoError(err, "Failed to parse existing rpc url") - port, err := strconv.Atoi(u.Port()) - require.NoError(err, "Invalid rpc port") - return port +type L2ELOption interface { + Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) } -func (n *L2ELNode) Stop() { - n.mu.Lock() - defer n.mu.Unlock() - if n.l2Geth == nil { - n.logger.Warn("op-geth already stopped") - return - } - n.logger.Info("Closing op-geth", "id", n.id) - closeErr := n.l2Geth.Close() - n.logger.Info("Closed op-geth", "id", n.id, "err", closeErr) - n.l2Geth = nil +// WithGlobalL2ELOption applies the L2ELOption to all L2ELNode instances in this orchestrator +func WithGlobalL2ELOption(opt L2ELOption) stack.Option[*Orchestrator] { + return stack.BeforeDeploy(func(o *Orchestrator) { + o.l2ELOptions = append(o.l2ELOptions, opt) + }) } -func WithL2ELNode(id stack.L2ELNodeID, supervisorID *stack.SupervisorID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - - require := p.Require() +type L2ELOptionFn func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) - l2Net, ok := orch.l2Nets.Get(id.ChainID()) - require.True(ok, "L2 network required") +var _ L2ELOption = L2ELOptionFn(nil) - jwtPath, _ := orch.writeDefaultJWT() +func (fn L2ELOptionFn) Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { + fn(p, id, cfg) +} - useInterop := l2Net.genesis.Config.InteropTime != nil +// L2ELOptionBundle a list of multiple L2ELOption, to all be applied in order. +type L2ELOptionBundle []L2ELOption - supervisorRPC := "" - if useInterop { - require.NotNil(supervisorID, "supervisor is required for interop") - sup, ok := orch.supervisors.Get(*supervisorID) - require.True(ok, "supervisor is required for interop") - supervisorRPC = sup.userRPC - } +var _ L2ELOption = L2ELOptionBundle(nil) - logger := p.Logger() +func (l L2ELOptionBundle) Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { + for _, opt := range l { + p.Require().NotNil(opt, "cannot Apply nil L2ELOption") + opt.Apply(p, id, cfg) + } +} - l2EL := &L2ELNode{ - id: id, - p: orch.P(), - logger: logger, - l2Net: l2Net, - jwtPath: jwtPath, - supervisorRPC: supervisorRPC, - } - l2EL.Start() - p.Cleanup(func() { - l2EL.Stop() - }) - require.True(orch.l2ELs.SetIfMissing(id, l2EL), "must be unique L2 EL node") - }) +// WithL2ELNode adds the default type of L2 CL node. +// The default can be configured with DEVSTACK_L2EL_KIND. +// Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. +func WithL2ELNode(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { + switch os.Getenv("DEVSTACK_L2EL_KIND") { + case "op-reth": + return WithOpReth(id, opts...) + default: + return WithOpGeth(id, opts...) + } } -func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithExtL2Node(id stack.L2ELNodeID, elRPCEndpoint string) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() - l2EL1, ok := orch.l2ELs.Get(l2EL1ID) - require.True(ok, "looking for L2 EL node 1 to connect p2p") - l2EL2, ok := orch.l2ELs.Get(l2EL2ID) - require.True(ok, "looking for L2 EL node 2 to connect p2p") - require.Equal(l2EL1.l2Net.rollupCfg.L2ChainID, l2EL2.l2Net.rollupCfg.L2ChainID, "must be same l2 chain") - - ctx := orch.P().Ctx() - logger := orch.P().Logger() - - rpc1, err := dial.DialRPCClientWithTimeout(ctx, 30*time.Second, logger, l2EL1.userRPC) - require.NoError(err, "failed to connect to el1 rpc") - defer rpc1.Close() - rpc2, err := dial.DialRPCClientWithTimeout(ctx, 30*time.Second, logger, l2EL2.userRPC) - require.NoError(err, "failed to connect to el2 rpc") - defer rpc2.Close() - - ConnectP2P(orch.P().Ctx(), require, rpc1, rpc2) - }) -} - -type RpcCaller interface { - CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error -} - -// ConnectP2P creates a p2p peer connection between node1 and node2. -func ConnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller) { - var targetInfo p2p.NodeInfo - require.NoError(acceptor.CallContext(ctx, &targetInfo, "admin_nodeInfo"), "get node info") - - var peerAdded bool - require.NoError(initiator.CallContext(ctx, &peerAdded, "admin_addPeer", targetInfo.Enode), "add peer") - require.True(peerAdded, "should have added peer successfully") - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - err := wait.For(ctx, time.Second, func() (bool, error) { - var peers []peer - if err := initiator.CallContext(ctx, &peers, "admin_peers"); err != nil { - return false, err + // Create L2 EL node with external RPC + l2ELNode := &OpGeth{ + id: id, + userRPC: elRPCEndpoint, + readOnly: true, } - return slices.ContainsFunc(peers, func(p peer) bool { - return p.ID == targetInfo.ID - }), nil + require.True(orch.l2ELs.SetIfMissing(id, l2ELNode), "must not already exist") }) - require.NoError(err, "The peer was not connected") -} - -type peer struct { - ID string `json:"id"` } diff --git a/op-devstack/sysgo/l2_el_opgeth.go b/op-devstack/sysgo/l2_el_opgeth.go new file mode 100644 index 0000000000000..e53f07ff3ad46 --- /dev/null +++ b/op-devstack/sysgo/l2_el_opgeth.go @@ -0,0 +1,183 @@ +package sysgo + +import ( + "sync" + + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/log" + gn "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" +) + +type OpGeth struct { + mu sync.Mutex + + p devtest.P + logger log.Logger + id stack.L2ELNodeID + l2Net *L2Network + jwtPath string + jwtSecret [32]byte + supervisorRPC string + l2Geth *geth.GethInstance + readOnly bool + + authRPC string + userRPC string + + authProxy *tcpproxy.Proxy + userProxy *tcpproxy.Proxy +} + +var _ L2ELNode = (*OpGeth)(nil) + +func (n *OpGeth) UserRPC() string { + return n.userRPC +} + +func (n *OpGeth) EngineRPC() string { + return n.authRPC +} + +func (n *OpGeth) JWTPath() string { + return n.jwtPath +} + +func (n *OpGeth) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + // ReadOnly cannot expose auth RPC + var engineCl client.RPC + if !n.readOnly { + auth := rpc.WithHTTPAuth(gn.NewJWTAuth(n.jwtSecret)) + engineCl, err = client.NewRPC(system.T().Ctx(), system.Logger(), n.authRPC, client.WithGethRPCOptions(auth)) + require.NoError(err) + system.T().Cleanup(engineCl.Close) + } + + l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ + RollupCfg: l2Net.RollupConfig(), + ELNodeConfig: shim.ELNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + Client: rpcCl, + ChainID: n.id.ChainID(), + }, + EngineClient: engineCl, + ID: n.id, + }) + sysL2EL.SetLabel(match.LabelVendor, string(match.OpGeth)) + l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) +} + +func (n *OpGeth) Start() { + n.mu.Lock() + defer n.mu.Unlock() + if n.l2Geth != nil { + n.logger.Warn("op-geth already started") + return + } + + if n.authProxy == nil { + n.authProxy = tcpproxy.New(n.logger.New("proxy", "l2el-auth")) + n.p.Require().NoError(n.authProxy.Start()) + n.p.Cleanup(func() { + n.authProxy.Close() + }) + n.authRPC = "ws://" + n.authProxy.Addr() + } + if n.userProxy == nil { + n.userProxy = tcpproxy.New(n.logger.New("proxy", "l2el-user")) + n.p.Require().NoError(n.userProxy.Start()) + n.p.Cleanup(func() { + n.userProxy.Close() + }) + n.userRPC = "ws://" + n.userProxy.Addr() + } + + require := n.p.Require() + l2Geth, err := geth.InitL2(n.id.String(), n.l2Net.genesis, n.jwtPath, + func(ethCfg *ethconfig.Config, nodeCfg *gn.Config) error { + ethCfg.InteropMessageRPC = n.supervisorRPC + ethCfg.InteropMempoolFiltering = true + nodeCfg.P2P = p2p.Config{ + NoDiscovery: true, + ListenAddr: "127.0.0.1:0", + MaxPeers: 10, + } + return nil + }) + require.NoError(err) + require.NoError(l2Geth.Node.Start()) + n.l2Geth = l2Geth + n.authProxy.SetUpstream(ProxyAddr(require, l2Geth.AuthRPC().RPC())) + n.userProxy.SetUpstream(ProxyAddr(require, l2Geth.UserRPC().RPC())) +} + +func (n *OpGeth) Stop() { + n.mu.Lock() + defer n.mu.Unlock() + if n.l2Geth == nil { + n.logger.Warn("op-geth already stopped") + return + } + n.logger.Info("Closing op-geth", "id", n.id) + closeErr := n.l2Geth.Close() + n.logger.Info("Closed op-geth", "id", n.id, "err", closeErr) + n.l2Geth = nil +} + +func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) + require := p.Require() + + l2Net, ok := orch.l2Nets.Get(id.ChainID()) + require.True(ok, "L2 network required") + + cfg := DefaultL2ELConfig() + orch.l2ELOptions.Apply(p, id, cfg) // apply global options + L2ELOptionBundle(opts).Apply(p, id, cfg) // apply specific options + + jwtPath, jwtSecret := orch.writeDefaultJWT() + + useInterop := l2Net.genesis.Config.InteropTime != nil + + supervisorRPC := "" + if useInterop { + require.NotNil(cfg.SupervisorID, "supervisor is required for interop") + sup, ok := orch.supervisors.Get(*cfg.SupervisorID) + require.True(ok, "supervisor is required for interop") + supervisorRPC = sup.UserRPC() + } + + logger := p.Logger() + + l2EL := &OpGeth{ + id: id, + p: orch.P(), + logger: logger, + l2Net: l2Net, + jwtPath: jwtPath, + jwtSecret: jwtSecret, + supervisorRPC: supervisorRPC, + } + l2EL.Start() + p.Cleanup(func() { + l2EL.Stop() + }) + require.True(orch.l2ELs.SetIfMissing(id, l2EL), "must be unique L2 EL node") + }) +} diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go new file mode 100644 index 0000000000000..45a221bb9d7e2 --- /dev/null +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -0,0 +1,251 @@ +package sysgo + +import ( + "encoding/json" + "os" + "path/filepath" + "sync" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" +) + +type OpReth struct { + mu sync.Mutex + + id stack.L2ELNodeID + l2Net *L2Network + jwtPath string + authRPC string + userRPC string + + authProxy *tcpproxy.Proxy + userProxy *tcpproxy.Proxy + + execPath string + args []string + // Each entry is of the form "key=value". + env []string + + p devtest.P + + sub *SubProcess +} + +var _ L2ELNode = (*OpReth)(nil) + +func (n *OpReth) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ + RollupCfg: l2Net.RollupConfig(), + ELNodeConfig: shim.ELNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + Client: rpcCl, + ChainID: n.id.ChainID(), + }, + ID: n.id, + }) + sysL2EL.SetLabel(match.LabelVendor, string(match.OpReth)) + l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) +} + +func (n *OpReth) Start() { + n.mu.Lock() + defer n.mu.Unlock() + if n.sub != nil { + n.p.Logger().Warn("op-reth already started") + return + } + if n.authProxy == nil { + n.authProxy = tcpproxy.New(n.p.Logger()) + n.p.Require().NoError(n.authProxy.Start()) + n.p.Cleanup(func() { + n.authProxy.Close() + }) + n.authRPC = "ws://" + n.authProxy.Addr() + } + if n.userProxy == nil { + n.userProxy = tcpproxy.New(n.p.Logger()) + n.p.Require().NoError(n.userProxy.Start()) + n.p.Cleanup(func() { + n.userProxy.Close() + }) + n.userRPC = "ws://" + n.userProxy.Addr() + } + logOut := logpipe.ToLogger(n.p.Logger().New("src", "stdout")) + logErr := logpipe.ToLogger(n.p.Logger().New("src", "stderr")) + userRPC := make(chan string, 1) + authRPC := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "RPC WS server started": + select { + case userRPC <- "ws://" + e.FieldValue("url").(string): + default: + } + case "RPC auth server started": + select { + case authRPC <- "ws://" + e.FieldValue("url").(string): + default: + } + } + } + stdOutLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logErr(e) + }) + n.sub = NewSubProcess(n.p, stdOutLogs, stdErrLogs) + + err := n.sub.Start(n.execPath, n.args, n.env) + n.p.Require().NoError(err, "Must start") + + var userRPCAddr, authRPCAddr string + n.p.Require().NoError(tasks.Await(n.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") + n.p.Require().NoError(tasks.Await(n.p.Ctx(), authRPC, &authRPCAddr), "need auth RPC") + + n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), userRPCAddr)) + n.authProxy.SetUpstream(ProxyAddr(n.p.Require(), authRPCAddr)) +} + +// Stop stops the op-reth node. +// warning: no restarts supported yet, since the RPC port is not remembered. +func (n *OpReth) Stop() { + n.mu.Lock() + defer n.mu.Unlock() + err := n.sub.Stop() + n.p.Require().NoError(err, "Must stop") + n.sub = nil +} + +func (n *OpReth) UserRPC() string { + return n.userRPC +} + +func (n *OpReth) EngineRPC() string { + return n.authRPC +} + +func (n *OpReth) JWTPath() string { + return n.jwtPath +} + +func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) + require := p.Require() + + l2Net, ok := orch.l2Nets.Get(id.ChainID()) + require.True(ok, "L2 network required") + + cfg := DefaultL2ELConfig() + orch.l2ELOptions.Apply(p, id, cfg) // apply global options + L2ELOptionBundle(opts).Apply(p, id, cfg) // apply specific options + + jwtPath, _ := orch.writeDefaultJWT() + + useInterop := l2Net.genesis.Config.InteropTime != nil + + supervisorRPC := "" + if useInterop { + require.NotNil(cfg.SupervisorID, "supervisor is required for interop") + sup, ok := orch.supervisors.Get(*cfg.SupervisorID) + require.True(ok, "supervisor is required for interop") + supervisorRPC = sup.UserRPC() + } + + tempDir := p.TempDir() + data, err := json.Marshal(l2Net.genesis) + p.Require().NoError(err, "must json-encode genesis") + chainConfigPath := filepath.Join(tempDir, "genesis.json") + p.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") + + dataDirPath := filepath.Join(tempDir, "data") + p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") + + // reth writes logs not just to stdout, but also to file, + // and to global user-cache by default, rather than the datadir. + // So we customize this to temp-dir too, to not pollute the user-cache dir. + logDirPath := filepath.Join(tempDir, "logs") + p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create logs dir") + + tempP2PPath := filepath.Join(tempDir, "p2pkey.txt") + + execPath := os.Getenv("OP_RETH_EXEC_PATH") + p.Require().NotEmpty(execPath, "OP_RETH_EXEC_PATH environment variable must be set") + _, err = os.Stat(execPath) + p.Require().NotErrorIs(err, os.ErrNotExist, "executable must exist") + + // reth does not support env-var configuration like the Go services, + // so we use the CLI flags instead. + args := []string{ + "node", + "--chain=" + chainConfigPath, + "--with-unused-ports", + "--datadir=" + dataDirPath, + "--log.file.directory=" + logDirPath, + "--disable-nat", + "--disable-dns-discovery", + "--disable-discv4-discovery", + "--p2p-secret-key=" + tempP2PPath, + "--nat=none", + "--addr=127.0.0.1", + "--port=0", + "--http", + "--http.addr=127.0.0.1", + "--http.port=0", + "--http.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", + "--ws", + "--ws.addr=127.0.0.1", + "--ws.port=0", + "--ws.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", + "--ipcdisable", + "--authrpc.addr=127.0.0.1", + "--authrpc.port=0", + "--authrpc.jwtsecret=" + jwtPath, + "--txpool.minimum-priority-fee=1", + "--txpool.nolocals", + "--builder.interval=100ms", + "--builder.deadline=2", + "--log.stdout.format=json", + "--color=never", + "-vvvv", + } + if supervisorRPC != "" { + args = append(args, "--rollup.supervisor-http="+supervisorRPC) + } + + l2EL := &OpReth{ + id: id, + l2Net: l2Net, + jwtPath: jwtPath, + authRPC: "", + userRPC: "", + execPath: execPath, + args: args, + env: []string{}, + p: p, + } + + p.Logger().Info("Starting op-reth") + l2EL.Start() + p.Cleanup(l2EL.Stop) + p.Logger().Info("op-reth is ready", "userRPC", l2EL.userRPC, "authRPC", l2EL.authRPC) + require.True(orch.l2ELs.SetIfMissing(id, l2EL), "must be unique L2 EL node") + }) +} diff --git a/op-devstack/sysgo/l2_el_p2p_util.go b/op-devstack/sysgo/l2_el_p2p_util.go new file mode 100644 index 0000000000000..a97633d069ff1 --- /dev/null +++ b/op-devstack/sysgo/l2_el_p2p_util.go @@ -0,0 +1,92 @@ +package sysgo + +import ( + "context" + "slices" + "time" + + "github.com/ethereum/go-ethereum/p2p" + + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/testreq" +) + +func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + require := orch.P().Require() + + l2EL1, ok := orch.l2ELs.Get(l2EL1ID) + require.True(ok, "looking for L2 EL node 1 to connect p2p") + l2EL2, ok := orch.l2ELs.Get(l2EL2ID) + require.True(ok, "looking for L2 EL node 2 to connect p2p") + require.Equal(l2EL1ID.ChainID(), l2EL2ID.ChainID(), "must be same l2 chain") + + ctx := orch.P().Ctx() + logger := orch.P().Logger() + + rpc1, err := dial.DialRPCClientWithTimeout(ctx, logger, l2EL1.UserRPC()) + require.NoError(err, "failed to connect to el1 rpc") + defer rpc1.Close() + rpc2, err := dial.DialRPCClientWithTimeout(ctx, logger, l2EL2.UserRPC()) + require.NoError(err, "failed to connect to el2 rpc") + defer rpc2.Close() + + ConnectP2P(orch.P().Ctx(), require, rpc1, rpc2) + }) +} + +type RpcCaller interface { + CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error +} + +// ConnectP2P creates a p2p peer connection between node1 and node2. +func ConnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller) { + var targetInfo p2p.NodeInfo + require.NoError(acceptor.CallContext(ctx, &targetInfo, "admin_nodeInfo"), "get node info") + + var peerAdded bool + require.NoError(initiator.CallContext(ctx, &peerAdded, "admin_addPeer", targetInfo.Enode), "add peer") + require.True(peerAdded, "should have added peer successfully") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := wait.For(ctx, time.Second, func() (bool, error) { + var peers []peer + if err := initiator.CallContext(ctx, &peers, "admin_peers"); err != nil { + return false, err + } + return slices.ContainsFunc(peers, func(p peer) bool { + return p.ID == targetInfo.ID + }), nil + }) + require.NoError(err, "The peer was not connected") +} + +// DisconnectP2P disconnects a p2p peer connection between node1 and node2. +func DisconnectP2P(ctx context.Context, require *testreq.Assertions, initiator RpcCaller, acceptor RpcCaller) { + var targetInfo p2p.NodeInfo + require.NoError(acceptor.CallContext(ctx, &targetInfo, "admin_nodeInfo"), "get node info") + + var peerRemoved bool + require.NoError(initiator.CallContext(ctx, &peerRemoved, "admin_removePeer", targetInfo.ENR), "add peer") + require.True(peerRemoved, "should have removed peer successfully") + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err := wait.For(ctx, time.Second, func() (bool, error) { + var peers []peer + if err := initiator.CallContext(ctx, &peers, "admin_peers"); err != nil { + return false, err + } + return !slices.ContainsFunc(peers, func(p peer) bool { + return p.ID == targetInfo.ID + }), nil + }) + require.NoError(err, "The peer was not removed") +} + +type peer struct { + ID string `json:"id"` +} diff --git a/op-devstack/sysgo/l2_el_synctester.go b/op-devstack/sysgo/l2_el_synctester.go new file mode 100644 index 0000000000000..5ee34116b0816 --- /dev/null +++ b/op-devstack/sysgo/l2_el_synctester.go @@ -0,0 +1,203 @@ +package sysgo + +import ( + "fmt" + "sync" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" +) + +// SyncTesterEL is an L2ELNode implementation that runs a sync tester service. +// It provides RPC endpoints that can be used by CL nodes for testing sync functionality. +type SyncTesterEL struct { + mu sync.Mutex + + id stack.L2ELNodeID + l2Net *L2Network + jwtPath string + + authRPC string + userRPC string + + authProxy *tcpproxy.Proxy + userProxy *tcpproxy.Proxy + + config *SyncTesterELConfig + p devtest.P + + // Reference to the orchestrator to find the EL node to connect to + orch *Orchestrator +} + +type SyncTesterELConfig struct { + FCUState eth.FCUState + ELSyncActive bool + ELSyncTarget uint64 +} + +func (cfg *SyncTesterELConfig) Path() string { + path := fmt.Sprintf("?latest=%d&safe=%d&finalized=%d", cfg.FCUState.Latest, cfg.FCUState.Safe, cfg.FCUState.Finalized) + if cfg.ELSyncActive { + path += fmt.Sprintf("&el_sync_target=%d", cfg.ELSyncTarget) + } + return path +} + +func DefaultSyncTesterELConfig() *SyncTesterELConfig { + return &SyncTesterELConfig{ + FCUState: eth.FCUState{Latest: 0, Safe: 0, Finalized: 0}, + ELSyncActive: false, + ELSyncTarget: 0, + } +} + +type SyncTesterELOption interface { + Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) +} + +// WithGlobalSyncTesterELOption applies the SyncTesterELOption to all SyncTesterEL instances in this orchestrator +func WithGlobalSyncTesterELOption(opt SyncTesterELOption) stack.Option[*Orchestrator] { + return stack.BeforeDeploy(func(o *Orchestrator) { + o.SyncTesterELOptions = append(o.SyncTesterELOptions, opt) + }) +} + +type SyncTesterELOptionFn func(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) + +var _ SyncTesterELOption = SyncTesterELOptionFn(nil) + +func (fn SyncTesterELOptionFn) Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) { + fn(p, id, cfg) +} + +// SyncTesterELOptionBundle a list of multiple SyncTesterELOption, to all be applied in order. +type SyncTesterELOptionBundle []SyncTesterELOption + +var _ SyncTesterELOptionBundle = SyncTesterELOptionBundle(nil) + +func (l SyncTesterELOptionBundle) Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) { + for _, opt := range l { + p.Require().NotNil(opt, "cannot Apply nil SyncTesterELOption") + opt.Apply(p, id, cfg) + } +} + +var _ L2ELNode = (*SyncTesterEL)(nil) + +func (n *SyncTesterEL) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + + l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ + RollupCfg: l2Net.RollupConfig(), + ELNodeConfig: shim.ELNodeConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + Client: rpcCl, + ChainID: n.id.ChainID(), + }, + ID: n.id, + }) + sysL2EL.SetLabel(match.LabelVendor, "sync-tester") + l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) +} + +func (n *SyncTesterEL) Start() { + n.mu.Lock() + defer n.mu.Unlock() + + // The SyncTesterEL should connect to the existing sync tester service + // Get the endpoint from the orchestrator's syncTester service + if n.orch.syncTester == nil || n.orch.syncTester.service == nil { + n.p.Logger().Error("syncTester service not available in orchestrator") + return + } + + // Use NewEndpoint to get the correct session-specific endpoint for this chain ID + endpoint := n.orch.syncTester.service.SyncTesterRPCPath(n.id.ChainID(), true) + + path := endpoint + n.config.Path() + + if n.authProxy == nil { + n.authProxy = tcpproxy.New(n.p.Logger().New("proxy", "l2el-synctester-auth")) + n.p.Require().NoError(n.authProxy.Start()) + n.p.Cleanup(func() { + n.authProxy.Close() + }) + + rpc := "http://" + n.authProxy.Addr() + n.authRPC = rpc + path + } + if n.userProxy == nil { + n.userProxy = tcpproxy.New(n.p.Logger().New("proxy", "l2el-synctester-user")) + n.p.Require().NoError(n.userProxy.Start()) + n.p.Cleanup(func() { + n.userProxy.Close() + }) + + rpc := "http://" + n.userProxy.Addr() + n.userRPC = rpc + path + } + + sessionURL := n.orch.syncTester.service.RPC() + path + + n.authProxy.SetUpstream(ProxyAddr(n.p.Require(), sessionURL)) + n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), sessionURL)) +} + +func (n *SyncTesterEL) Stop() { + // The SyncTesterEL is just a proxy, so there's nothing to stop +} + +func (n *SyncTesterEL) UserRPC() string { + return n.userRPC +} + +func (n *SyncTesterEL) EngineRPC() string { + return n.authRPC +} + +func (n *SyncTesterEL) JWTPath() string { + return n.jwtPath +} + +// WithSyncTesterL2ELNode creates a SyncTesterEL that satisfies the L2ELNode interface +// The sync tester acts as an EL node that can be used by CL nodes for testing sync. +func WithSyncTesterL2ELNode(id, readonlyEL stack.L2ELNodeID, opts ...SyncTesterELOption) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) + require := p.Require() + + l2Net, ok := orch.l2Nets.Get(readonlyEL.ChainID()) + require.True(ok, "L2 network required") + + cfg := DefaultSyncTesterELConfig() + orch.SyncTesterELOptions.Apply(p, id, cfg) // apply global options + SyncTesterELOptionBundle(opts).Apply(p, id, cfg) // apply specific options + + jwtPath, _ := orch.writeDefaultJWT() + + syncTesterEL := &SyncTesterEL{ + id: id, + l2Net: l2Net, + jwtPath: jwtPath, + config: cfg, + p: p, + orch: orch, + } + + p.Logger().Info("Starting sync tester EL", "id", id) + syncTesterEL.Start() + p.Cleanup(syncTesterEL.Stop) + p.Logger().Info("sync tester EL is ready", "userRPC", syncTesterEL.userRPC, "authRPC", syncTesterEL.authRPC) + require.True(orch.l2ELs.SetIfMissing(id, syncTesterEL), "must be unique L2 EL node") + }) +} diff --git a/op-devstack/sysgo/l2_network_superchain_registry.go b/op-devstack/sysgo/l2_network_superchain_registry.go new file mode 100644 index 0000000000000..f395d2e813d4d --- /dev/null +++ b/op-devstack/sysgo/l2_network_superchain_registry.go @@ -0,0 +1,86 @@ +package sysgo + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/core" + + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/superutil" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" +) + +// WithL2NetworkFromSuperchainRegistry creates an L2 network using the rollup config from the superchain registry +func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.L2NetworkID, networkName string) stack.Option[*Orchestrator] { + return stack.BeforeDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2NetworkID)) + require := p.Require() + + // Load the rollup config from the superchain registry + rollupCfg, err := chaincfg.GetRollupConfig(networkName) + require.NoError(err, "failed to load rollup config for network %s", networkName) + + // Get the chain config from the superchain registry + chainCfg := chaincfg.ChainByName(networkName) + require.NotNil(chainCfg, "chain config not found for network %s", networkName) + + // Load the chain config using superutil + paramsChainConfig, err := superutil.LoadOPStackChainConfigFromChainID(chainCfg.ChainID) + require.NoError(err, "failed to load chain config for network %s", networkName) + + // Create a genesis config from the chain config + genesis := &core.Genesis{ + Config: paramsChainConfig, + } + + // Create the L2 network + l2Net := &L2Network{ + id: l2NetworkID, + l1ChainID: eth.ChainIDFromBig(rollupCfg.L1ChainID), + genesis: genesis, + rollupCfg: rollupCfg, + keys: orch.keys, + } + + require.True(orch.l2Nets.SetIfMissing(l2NetworkID.ChainID(), l2Net), + fmt.Sprintf("must not already exist: %s", l2NetworkID)) + }) +} + +// WithL2NetworkFromSuperchainRegistryWithDependencySet creates an L2 network using the rollup config from the superchain registry +// and also sets up the dependency set for interop support +func WithL2NetworkFromSuperchainRegistryWithDependencySet(l2NetworkID stack.L2NetworkID, networkName string) stack.Option[*Orchestrator] { + return stack.Combine( + WithL2NetworkFromSuperchainRegistry(l2NetworkID, networkName), + stack.BeforeDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2NetworkID)) + require := p.Require() + + // Load the dependency set from the superchain registry + chainCfg := chaincfg.ChainByName(networkName) + require.NotNil(chainCfg, "chain config not found for network %s", networkName) + + _, err := depset.FromRegistry(eth.ChainIDFromUInt64(chainCfg.ChainID)) + if err != nil { + // If dependency set is not available, that's okay - it's optional + p.Logger().Info("No dependency set available for network", "network", networkName, "err", err) + return + } + + // Create a cluster to hold the dependency set + clusterID := stack.ClusterID(networkName) + + // Create a minimal full config set with just the dependency set + // This is a simplified approach - in a real implementation you might want + // to create a proper FullConfigSetMerged + cluster := &Cluster{ + id: clusterID, + cfgset: depset.FullConfigSetMerged{}, // Empty for now + } + + orch.clusters.Set(clusterID, cluster) + }), + ) +} diff --git a/op-devstack/sysgo/l2_proposer.go b/op-devstack/sysgo/l2_proposer.go index 37089f9b62637..0ed7a7806a0ca 100644 --- a/op-devstack/sysgo/l2_proposer.go +++ b/op-devstack/sysgo/l2_proposer.go @@ -91,12 +91,14 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l } proposerCLIConfig := &ps.CLIConfig{ - L1EthRpc: l1EL.userRPC, + L1EthRpc: l1EL.UserRPC(), L2OOAddress: "", // legacy, not used, fault-proofs support only for now. PollInterval: 500 * time.Millisecond, AllowNonFinalized: true, - TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.userRPC), proposerSecret), - RPCConfig: oprpc.CLIConfig{}, + TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), proposerSecret), + RPCConfig: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, LogConfig: oplog.CLIConfig{ Level: log.LvlInfo, Format: oplog.FormatText, @@ -118,12 +120,12 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l require.NotNil(supervisorID, "need supervisor to connect to in interop") supervisorNode, ok := orch.supervisors.Get(*supervisorID) require.True(ok) - proposerCLIConfig.SupervisorRpcs = []string{supervisorNode.userRPC} + proposerCLIConfig.SupervisorRpcs = []string{supervisorNode.UserRPC()} } else { require.NotNil(l2CLID, "need L2 CL to connect to pre-interop") l2CL, ok := orch.l2CLs.Get(*l2CLID) require.True(ok) - proposerCLIConfig.RollupRpc = l2CL.userRPC + proposerCLIConfig.RollupRpc = l2CL.UserRPC() } proposer, err := ps.ProposerServiceFromCLIConfig(ctx, "0.0.1", proposerCLIConfig, logger) diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go index 3d02543aea426..41f7956181e01 100644 --- a/op-devstack/sysgo/orchestrator.go +++ b/op-devstack/sysgo/orchestrator.go @@ -31,24 +31,27 @@ type Orchestrator struct { // options batcherOptions []BatcherOption proposerOptions []ProposerOption - l2CLOptions []L2CLOption + l2CLOptions L2CLOptionBundle + l2ELOptions L2ELOptionBundle + SyncTesterELOptions SyncTesterELOptionBundle deployerPipelineOptions []DeployerPipelineOption superchains locks.RWMap[stack.SuperchainID, *Superchain] clusters locks.RWMap[stack.ClusterID, *Cluster] l1Nets locks.RWMap[eth.ChainID, *L1Network] l2Nets locks.RWMap[eth.ChainID, *L2Network] - l1ELs locks.RWMap[stack.L1ELNodeID, *L1ELNode] + l1ELs locks.RWMap[stack.L1ELNodeID, L1ELNode] l1CLs locks.RWMap[stack.L1CLNodeID, *L1CLNode] - l2ELs locks.RWMap[stack.L2ELNodeID, *L2ELNode] - l2CLs locks.RWMap[stack.L2CLNodeID, *L2CLNode] - supervisors locks.RWMap[stack.SupervisorID, *Supervisor] + l2ELs locks.RWMap[stack.L2ELNodeID, L2ELNode] + l2CLs locks.RWMap[stack.L2CLNodeID, L2CLNode] + supervisors locks.RWMap[stack.SupervisorID, Supervisor] testSequencers locks.RWMap[stack.TestSequencerID, *TestSequencer] batchers locks.RWMap[stack.L2BatcherID, *L2Batcher] challengers locks.RWMap[stack.L2ChallengerID, *L2Challenger] proposers locks.RWMap[stack.L2ProposerID, *L2Proposer] - faucet *FaucetService + syncTester *SyncTesterService + faucet *FaucetService controlPlane *ControlPlane @@ -119,15 +122,18 @@ func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { o.clusters.Range(rangeHydrateFn[stack.ClusterID, *Cluster](sys)) o.l1Nets.Range(rangeHydrateFn[eth.ChainID, *L1Network](sys)) o.l2Nets.Range(rangeHydrateFn[eth.ChainID, *L2Network](sys)) - o.l1ELs.Range(rangeHydrateFn[stack.L1ELNodeID, *L1ELNode](sys)) + o.l1ELs.Range(rangeHydrateFn[stack.L1ELNodeID, L1ELNode](sys)) o.l1CLs.Range(rangeHydrateFn[stack.L1CLNodeID, *L1CLNode](sys)) - o.l2ELs.Range(rangeHydrateFn[stack.L2ELNodeID, *L2ELNode](sys)) - o.l2CLs.Range(rangeHydrateFn[stack.L2CLNodeID, *L2CLNode](sys)) - o.supervisors.Range(rangeHydrateFn[stack.SupervisorID, *Supervisor](sys)) + o.l2ELs.Range(rangeHydrateFn[stack.L2ELNodeID, L2ELNode](sys)) + o.l2CLs.Range(rangeHydrateFn[stack.L2CLNodeID, L2CLNode](sys)) + o.supervisors.Range(rangeHydrateFn[stack.SupervisorID, Supervisor](sys)) o.testSequencers.Range(rangeHydrateFn[stack.TestSequencerID, *TestSequencer](sys)) o.batchers.Range(rangeHydrateFn[stack.L2BatcherID, *L2Batcher](sys)) o.challengers.Range(rangeHydrateFn[stack.L2ChallengerID, *L2Challenger](sys)) o.proposers.Range(rangeHydrateFn[stack.L2ProposerID, *L2Proposer](sys)) + if o.syncTester != nil { + o.syncTester.hydrate(sys) + } o.faucet.hydrate(sys) o.sysHook.PostHydrate(sys) } diff --git a/op-devstack/sysgo/proxy.go b/op-devstack/sysgo/proxy.go new file mode 100644 index 0000000000000..3e932f220a86e --- /dev/null +++ b/op-devstack/sysgo/proxy.go @@ -0,0 +1,14 @@ +package sysgo + +import ( + "net" + "net/url" + + "github.com/ethereum-optimism/optimism/op-service/testreq" +) + +func ProxyAddr(require *testreq.Assertions, urlStr string) string { + u, err := url.Parse(urlStr) + require.NoError(err) + return net.JoinHostPort(u.Hostname(), u.Port()) +} diff --git a/op-devstack/sysgo/subproc.go b/op-devstack/sysgo/subproc.go new file mode 100644 index 0000000000000..799357a049733 --- /dev/null +++ b/op-devstack/sysgo/subproc.go @@ -0,0 +1,163 @@ +package sysgo + +import ( + "context" + "fmt" + "os" + "os/exec" + "sync" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/logpipe" +) + +// SubProcess is a process that can be started, and stopped, and restarted. +// +// If at any point the process fails to start or exit successfully, +// the failure is reported to the devtest.P. +// +// If the sub-process exits by itself, the exit is detected, +// and if not successful (non-zero exit code on unix) it also reports failure to the devtest.P. +// +// Sub-process logs are assumed to be structured JSON logs, and are piped to the logger. +type SubProcess struct { + p devtest.P + cmd *exec.Cmd + + stdOutLogs logpipe.LogProcessor + stdErrLogs logpipe.LogProcessor + + waitCtx context.Context // closed when process-Wait completes + + mu sync.Mutex +} + +func NewSubProcess(p devtest.P, stdOutLogs, stdErrLogs logpipe.LogProcessor) *SubProcess { + return &SubProcess{ + p: p, + stdOutLogs: stdOutLogs, + stdErrLogs: stdErrLogs, + } +} + +func (sp *SubProcess) Start(cmdPath string, args []string, env []string) error { + sp.mu.Lock() + defer sp.mu.Unlock() + if sp.cmd != nil { + return fmt.Errorf("process is still running (PID: %d)", sp.cmd.Process.Pid) + } + cmd := exec.Command(cmdPath, args...) + cmd.Env = append(os.Environ(), env...) + stdout, err := cmd.StdoutPipe() + sp.p.Require().NoError(err, "stdout err") + stderr, err := cmd.StderrPipe() + sp.p.Require().NoError(err, "stderr err") + go func() { + err := logpipe.PipeLogs(stdout, sp.stdOutLogs) + sp.p.Require().NoError(err, "stdout logging error") + }() + go func() { + err := logpipe.PipeLogs(stderr, sp.stdErrLogs) + sp.p.Require().NoError(err, "stderr logging error") + }() + if err := cmd.Start(); err != nil { + return err + } + sp.cmd = cmd + + subCtx, subCancel := context.WithCancelCause(context.Background()) + go func() { + state, err := cmd.Process.Wait() + subCancel(err) + sp.p.Require().NoError(err, "Sub-process failed to be closed") + sp.p.Logger().Info("Sub-process stopped", "exitCode", state.ExitCode(), "pid", state.Pid()) + // if it exited on its own, then we care about the error. If not, we (or the user) signaled it. + if state.Exited() { + sp.p.Require().True(state.Success(), "Sub-process closed with error status: %s", state.String()) + } + }() + sp.waitCtx = subCtx + + sp.p.Cleanup(func() { + err := sp.Stop() + if err != nil { + sp.p.Logger().Error("Shutdown error", "err", err) + } + }) + return nil +} + +// Kill stops the process, and does not wait for it to complete. +func (sp *SubProcess) Kill() error { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // don't wait, just force it to stop immediately + return sp.GracefulStop(ctx) +} + +// Stop implements the default control-panel interface, +// and gracefully stops with a 10-second timeout. +func (sp *SubProcess) Stop() error { + // by default, for control-panel, use an interrupt and a 10-second grace + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + return sp.GracefulStop(ctx) +} + +// GracefulStop sends an interrupt and waits for the process to stop. +// If the given ctx is closed, a forced shutdown (process kill) is pursued. +func (sp *SubProcess) GracefulStop(ctx context.Context) error { + sp.mu.Lock() + defer sp.mu.Unlock() + if sp.cmd == nil { + return nil // already stopped gracefully + } + + if ctx.Err() == nil && sp.waitCtx.Err() == nil { + // if not force-closing, and not already done, then try an interrupt first. + sp.p.Logger().Info("Sending interrupt") + if err := sp.cmd.Process.Signal(os.Interrupt); err != nil { + return err + } + } + select { + case <-ctx.Done(): + sp.p.Logger().Warn("Sub-process did not respond to interrupt, force-closing now") + err := sp.cmd.Process.Kill() + if err != nil { + return fmt.Errorf("failed to force-close sub-process: %w", err) + } + sp.p.Logger().Info("Successfully force-closed sub-process") + // resources of cmd.Process will be cleaned up by the Process.Wait + case <-sp.waitCtx.Done(): + if err := context.Cause(sp.waitCtx); err != nil && err != context.Canceled { + sp.p.Logger().Warn("Sub-process exited with error", "err", err) + } else { + sp.p.Logger().Info("Sub-process gracefully exited") + } + } + sp.cmd = nil + sp.waitCtx = nil + return nil +} + +// Wait waits for the process to complete. +func (sp *SubProcess) Wait(ctx context.Context) error { + sp.mu.Lock() + defer sp.mu.Unlock() + if sp.waitCtx == nil { + return nil + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-sp.waitCtx.Done(): + if err := context.Cause(sp.waitCtx); err != nil && err != context.Canceled { + sp.p.Logger().Warn("Sub-process exited with error", "err", err) + return err + } else { + sp.p.Logger().Info("Sub-process gracefully exited") + return nil + } + } +} diff --git a/op-devstack/sysgo/subproc_test.go b/op-devstack/sysgo/subproc_test.go new file mode 100644 index 0000000000000..59ab24133aa0b --- /dev/null +++ b/op-devstack/sysgo/subproc_test.go @@ -0,0 +1,77 @@ +package sysgo + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/testlog" +) + +func TestSubProcess(gt *testing.T) { + tLog := testlog.Logger(gt, log.LevelInfo) + logger, capt := testlog.CaptureLogger(gt, log.LevelInfo) + + onFailNow := func(v bool) { + panic("fail") + } + onSkipNow := func() { + panic("skip") + } + p := devtest.NewP(context.Background(), logger, onFailNow, onSkipNow) + gt.Cleanup(p.Close) + + logProc := logpipe.LogProcessor(func(line []byte) { + logger.Info(string(line)) + tLog.Info("Sub-process logged message", "line", string(line)) + }) + sp := NewSubProcess(p, logProc, logProc) + + gt.Log("Running first sub-process") + testSleep(gt, capt, sp) + gt.Log("Restarting, second run") + capt.Clear() + testSleep(gt, capt, sp) + gt.Log("Trying a different command now") + capt.Clear() + testEcho(gt, capt, sp) + gt.Log("Second run of different command") + capt.Clear() + testEcho(gt, capt, sp) +} + +// testEcho tests that we can handle a sub-process that completes on its own +func testEcho(gt *testing.T, capt *testlog.CapturingHandler, sp *SubProcess) { + require.NoError(gt, sp.Start("/bin/echo", []string{"hello world"}, []string{})) + gt.Log("Started sub-process") + require.NoError(gt, sp.Wait(context.Background()), "echo must complete") + require.NoError(gt, sp.Stop()) + gt.Log("Stopped sub-process") + + require.NotNil(gt, capt.FindLog( + testlog.NewMessageFilter("hello world"))) + + require.NotNil(gt, capt.FindLog( + testlog.NewMessageFilter("Sub-process gracefully exited"))) +} + +// testSleep tests that we can force shut down a sub-process that is stuck +func testSleep(gt *testing.T, capt *testlog.CapturingHandler, sp *SubProcess) { + // Sleep for very, very, long + require.NoError(gt, sp.Start("/bin/sleep", []string{"10000000000"}, []string{})) + gt.Log("Started sub-process") + // Shut down the process before the sleep completes + require.NoError(gt, sp.Kill()) + gt.Log("Killed sub-process") + + require.NotNil(gt, capt.FindLog( + testlog.NewMessageFilter("Sub-process did not respond to interrupt, force-closing now"))) + + require.NotNil(gt, capt.FindLog( + testlog.NewMessageFilter("Successfully force-closed sub-process"))) +} diff --git a/op-devstack/sysgo/superroot.go b/op-devstack/sysgo/superroot.go index 40917e55a3e0f..f487acf9321b4 100644 --- a/op-devstack/sysgo/superroot.go +++ b/op-devstack/sysgo/superroot.go @@ -14,11 +14,9 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/contracts/bindings/delegatecallproxy" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/errutil" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/ethereum/go-ethereum/accounts/abi/bind" @@ -41,14 +39,14 @@ func WithSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, l2CLID stack l1EL, ok := o.l1ELs.Get(l1ELID) require.True(ok, "must have L1 EL node") - rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.userRPC) + rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) client := ethclient.NewClient(rpcClient) w3Client := w3.NewClient(rpcClient) l2CL, ok := o.l2CLs.Get(l2CLID) require.True(ok, "must have L2 CL node") - rollupClientProvider, err := dial.NewStaticL2RollupProvider(t.Ctx(), t.Logger(), l2CL.opNode.UserRPC().RPC()) + rollupClientProvider, err := dial.NewStaticL2RollupProvider(t.Ctx(), t.Logger(), l2CL.UserRPC()) require.NoError(err) rollupClient, err := rollupClientProvider.RollupClient(t.Ctx()) require.NoError(err) @@ -190,8 +188,11 @@ func WithSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, l2CLID stack } func deployDelegateCallProxy(t devtest.CommonT, transactOpts *bind.TransactOpts, client *ethclient.Client, owner common.Address) (common.Address, *delegatecallproxy.Delegatecallproxy) { - deployAddress, _, proxyContract, err := delegatecallproxy.DeployDelegatecallproxy(transactOpts, client, owner) + deployAddress, tx, proxyContract, err := delegatecallproxy.DeployDelegatecallproxy(transactOpts, client, owner) t.Require().NoError(err, "DelegateCallProxy deployment failed") + // Make sure the transaction actually got included rather than just being sent + _, err = wait.ForReceiptOK(t.Ctx(), client, tx.Hash()) + t.Require().NoError(err, "DelegateCallProxy deployment tx was not included successfully") return deployAddress, proxyContract } @@ -199,9 +200,8 @@ func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervis supervisor, ok := o.supervisors.Get(supervisorID) t.Require().True(ok, "must have supervisor") - clientRPC, err := client.NewRPC(t.Ctx(), t.Logger(), supervisor.userRPC) + client, err := dial.DialSupervisorClientWithTimeout(t.Ctx(), t.Logger(), supervisor.UserRPC()) t.Require().NoError(err) - client := sources.NewSupervisorClient(clientRPC) super, err := client.SuperRootAtTimestamp(t.Ctx(), hexutil.Uint64(timestamp)) t.Require().NoError(err, "super root at timestamp failed") return super.SuperRoot diff --git a/op-devstack/sysgo/supervisor.go b/op-devstack/sysgo/supervisor.go index d607478b3293c..8f39d8110e5b2 100644 --- a/op-devstack/sysgo/supervisor.go +++ b/op-devstack/sysgo/supervisor.go @@ -1,153 +1,27 @@ package sysgo import ( - "context" - "sync" + "os" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/client" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/retry" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-service/sources" - supervisorConfig "github.com/ethereum-optimism/optimism/op-supervisor/config" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" ) -type Supervisor struct { - mu sync.Mutex - - id stack.SupervisorID - userRPC string - - cfg *supervisorConfig.Config - p devtest.P - logger log.Logger - - service *supervisor.SupervisorService -} - -var _ stack.Lifecycle = (*Supervisor)(nil) - -func (s *Supervisor) hydrate(sys stack.ExtensibleSystem) { - tlog := sys.Logger().New("id", s.id) - supClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, client.WithLazyDial()) - sys.T().Require().NoError(err) - sys.T().Cleanup(supClient.Close) - - sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: s.id, - Client: supClient, - })) -} - -func (s *Supervisor) rememberPort() { - port, err := s.service.Port() - s.p.Require().NoError(err) - s.cfg.RPC.ListenPort = port -} - -func (s *Supervisor) Start() { - s.mu.Lock() - defer s.mu.Unlock() - if s.service != nil { - s.logger.Warn("Supervisor already started") - return - } - super, err := supervisor.SupervisorFromConfig(context.Background(), s.cfg, s.logger) - s.p.Require().NoError(err) - - s.service = super - s.logger.Info("Starting supervisor") - err = super.Start(context.Background()) - s.p.Require().NoError(err, "supervisor failed to start") - s.logger.Info("Started supervisor") - - s.userRPC = super.RPC() - - s.rememberPort() -} - -func (s *Supervisor) Stop() { - s.mu.Lock() - defer s.mu.Unlock() - if s.service == nil { - s.logger.Warn("Supervisor already stopped") - return - } - ctx, cancel := context.WithCancel(context.Background()) - cancel() // force-quit - s.logger.Info("Closing supervisor") - closeErr := s.service.Stop(ctx) - s.logger.Info("Closed supervisor", "err", closeErr) - - s.service = nil +type Supervisor interface { + hydrate(system stack.ExtensibleSystem) + stack.Lifecycle + UserRPC() string } func WithSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) - require := p.Require() - - l1EL, ok := orch.l1ELs.Get(l1ELID) - require.True(ok, "need L1 EL node to connect supervisor to") - - cluster, ok := orch.clusters.Get(clusterID) - require.True(ok, "need cluster to determine dependency set") - - require.NotNil(cluster.cfgset, "need a full config set") - require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") - cfg := &supervisorConfig.Config{ - MetricsConfig: metrics.CLIConfig{ - Enabled: false, - }, - PprofConfig: oppprof.CLIConfig{ - ListenEnabled: false, - }, - LogConfig: oplog.CLIConfig{ // ignored, logger overrides this - Level: log.LevelDebug, - Format: oplog.FormatText, - }, - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - // When supervisor starts, store its RPC port here - // given by the os, to reclaim when restart. - ListenPort: 0, - EnableAdmin: true, - }, - SyncSources: &syncnode.CLISyncNodes{}, // no sync-sources - L1RPC: l1EL.userRPC, - // Note: datadir is created here, - // persistent across stop/start, for the duration of the package execution. - Datadir: orch.p.TempDir(), - Version: "dev", - FullConfigSetSource: cluster.cfgset, - MockRun: false, - SynchronousProcessors: false, - DatadirSyncEndpoint: "", - } - - plog := p.Logger() - supervisorNode := &Supervisor{ - id: supervisorID, - userRPC: "", // set on start - cfg: cfg, - p: p, - logger: plog, - service: nil, // set on start - } - orch.supervisors.Set(supervisorID, supervisorNode) - supervisorNode.Start() - orch.p.Cleanup(supervisorNode.Stop) - }) + switch os.Getenv("DEVSTACK_SUPERVISOR_KIND") { + case "kona": + return WithKonaSupervisor(supervisorID, clusterID, l1ELID) + default: + return WithOPSupervisor(supervisorID, clusterID, l1ELID) + } } func WithManagedBySupervisor(l2CLID stack.L2CLNodeID, supervisorID stack.SupervisorID) stack.Option[*Orchestrator] { @@ -156,15 +30,14 @@ func WithManagedBySupervisor(l2CLID stack.L2CLNodeID, supervisorID stack.Supervi l2CL, ok := orch.l2CLs.Get(l2CLID) require.True(ok, "looking for L2 CL node to connect to supervisor") - interopEndpoint, secret := l2CL.opNode.InteropRPC() + interopEndpoint, secret := l2CL.InteropRPC() s, ok := orch.supervisors.Get(supervisorID) require.True(ok, "looking for supervisor") ctx := orch.P().Ctx() - rpcClient, err := client.NewRPC(ctx, orch.P().Logger(), s.userRPC, client.WithLazyDial()) + supClient, err := dial.DialSupervisorClientWithTimeout(ctx, orch.P().Logger(), s.UserRPC(), client.WithLazyDial()) orch.P().Require().NoError(err) - supClient := sources.NewSupervisorClient(rpcClient) err = retry.Do0(ctx, 10, retry.Exponential(), func() error { return supClient.AddL2RPC(ctx, interopEndpoint, secret) diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go new file mode 100644 index 0000000000000..b34df575bd818 --- /dev/null +++ b/op-devstack/sysgo/supervisor_kona.go @@ -0,0 +1,179 @@ +package sysgo + +import ( + "encoding/json" + "os" + "sync" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" +) + +type KonaSupervisor struct { + mu sync.Mutex + + id stack.SupervisorID + userRPC string + + userProxy *tcpproxy.Proxy + + execPath string + args []string + // Each entry is of the form "key=value". + env []string + + p devtest.P + + sub *SubProcess +} + +var _ stack.Lifecycle = (*OpSupervisor)(nil) + +func (s *KonaSupervisor) hydrate(sys stack.ExtensibleSystem) { + tlog := sys.Logger().New("id", s.id) + supClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, client.WithLazyDial()) + sys.T().Require().NoError(err) + sys.T().Cleanup(supClient.Close) + + sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ + CommonConfig: shim.NewCommonConfig(sys.T()), + ID: s.id, + Client: supClient, + })) +} + +func (s *KonaSupervisor) UserRPC() string { + return s.userRPC +} + +func (s *KonaSupervisor) Start() { + s.mu.Lock() + defer s.mu.Unlock() + if s.sub != nil { + s.p.Logger().Warn("Kona-supervisor already started") + return + } + + // Create a proxy for the user RPC, + // so other services can connect, and stay connected, across restarts. + if s.userProxy == nil { + s.userProxy = tcpproxy.New(s.p.Logger()) + s.p.Require().NoError(s.userProxy.Start()) + s.p.Cleanup(func() { + s.userProxy.Close() + }) + s.userRPC = "http://" + s.userProxy.Addr() + } + + // Create the sub-process. + // We pipe sub-process logs to the test-logger. + // And inspect them along the way, to get the RPC server address. + logOut := logpipe.ToLogger(s.p.Logger().New("src", "stdout")) + logErr := logpipe.ToLogger(s.p.Logger().New("src", "stderr")) + userRPC := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "RPC server bound to address": + userRPC <- "http://" + e.FieldValue("addr").(string) + } + } + stdOutLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogProcessor(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logErr(e) + }) + + s.sub = NewSubProcess(s.p, stdOutLogs, stdErrLogs) + err := s.sub.Start(s.execPath, s.args, s.env) + s.p.Require().NoError(err, "Must start") + + var userRPCAddr string + s.p.Require().NoError(tasks.Await(s.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") + + s.userProxy.SetUpstream(ProxyAddr(s.p.Require(), userRPCAddr)) +} + +func (s *KonaSupervisor) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.sub == nil { + s.p.Logger().Warn("kona-supervisor already stopped") + return + } + err := s.sub.Stop() + s.p.Require().NoError(err, "Must stop") + s.sub = nil +} + +func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) + require := p.Require() + + l1EL, ok := orch.l1ELs.Get(l1ELID) + require.True(ok, "need L1 EL node to connect supervisor to") + + cluster, ok := orch.clusters.Get(clusterID) + require.True(ok, "need cluster to determine dependency set") + + require.NotNil(cluster.cfgset, "need a full config set") + require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") + + tempDataDir := p.TempDir() + + cfgDir := p.TempDir() + + depsetCfgPath := cfgDir + "/depset.json" + depsetData, err := cluster.DepSet().MarshalJSON() + require.NoError(err, "failed to marshal dependency set") + p.Require().NoError(err, os.WriteFile(depsetCfgPath, depsetData, 0o644)) + + rollupCfgPath := cfgDir + "/rollup-config-*.json" + for _, l2Net := range orch.l2Nets.Values() { + chainID := l2Net.id.ChainID() + rollupData, err := json.Marshal(l2Net.rollupCfg) + require.NoError(err, "failed to marshal rollup config") + p.Require().NoError(err, os.WriteFile(cfgDir+"/rollup-config-"+chainID.String()+".json", rollupData, 0o644)) + } + + envVars := []string{ + "RPC_ADDR=127.0.0.1", + "DATADIR=" + tempDataDir, + "DEPENDENCY_SET=" + depsetCfgPath, + "ROLLUP_CONFIG_PATHS=" + rollupCfgPath, + "L1_RPC=" + l1EL.UserRPC(), + "RPC_ENABLE_ADMIN=true", + "L2_CONSENSUS_NODES=", + "L2_CONSENSUS_JWT_SECRET=", + "KONA_LOG_STDOUT_FORMAT=json", + } + + execPath := os.Getenv("KONA_SUPERVISOR_EXEC_PATH") + p.Require().NotEmpty(execPath, "KONA_SUPERVISOR_EXEC_PATH environment variable must be set") + _, err = os.Stat(execPath) + p.Require().NotErrorIs(err, os.ErrNotExist, "executable must exist") + + konaSupervisor := &KonaSupervisor{ + id: supervisorID, + userRPC: "", // retrieved from logs + execPath: execPath, + args: []string{}, + env: envVars, + p: p, + } + orch.supervisors.Set(supervisorID, konaSupervisor) + p.Logger().Info("Starting kona-supervisor") + konaSupervisor.Start() + p.Cleanup(konaSupervisor.Stop) + p.Logger().Info("Kona-supervisor is up", "rpc", konaSupervisor.UserRPC()) + }) +} diff --git a/op-devstack/sysgo/supervisor_op.go b/op-devstack/sysgo/supervisor_op.go new file mode 100644 index 0000000000000..fcc3f2015533f --- /dev/null +++ b/op-devstack/sysgo/supervisor_op.go @@ -0,0 +1,158 @@ +package sysgo + +import ( + "context" + "sync" + + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/client" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + supervisorConfig "github.com/ethereum-optimism/optimism/op-supervisor/config" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" +) + +type OpSupervisor struct { + mu sync.Mutex + + id stack.SupervisorID + userRPC string + + cfg *supervisorConfig.Config + p devtest.P + logger log.Logger + + service *supervisor.SupervisorService + + proxy *tcpproxy.Proxy +} + +var _ stack.Lifecycle = (*OpSupervisor)(nil) + +func (s *OpSupervisor) hydrate(sys stack.ExtensibleSystem) { + tlog := sys.Logger().New("id", s.id) + supClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, client.WithLazyDial()) + sys.T().Require().NoError(err) + sys.T().Cleanup(supClient.Close) + + sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ + CommonConfig: shim.NewCommonConfig(sys.T()), + ID: s.id, + Client: supClient, + })) +} + +func (s *OpSupervisor) UserRPC() string { + return s.userRPC +} + +func (s *OpSupervisor) Start() { + s.mu.Lock() + defer s.mu.Unlock() + if s.service != nil { + s.logger.Warn("Supervisor already started") + return + } + + if s.proxy == nil { + s.proxy = tcpproxy.New(s.logger.New("proxy", "supervisor")) + s.p.Require().NoError(s.proxy.Start()) + s.p.Cleanup(func() { + s.proxy.Close() + }) + s.userRPC = "http://" + s.proxy.Addr() + } + + super, err := supervisor.SupervisorFromConfig(context.Background(), s.cfg, s.logger) + s.p.Require().NoError(err) + + s.service = super + s.logger.Info("Starting supervisor") + err = super.Start(context.Background()) + s.p.Require().NoError(err, "supervisor failed to start") + s.logger.Info("Started supervisor") + s.proxy.SetUpstream(ProxyAddr(s.p.Require(), super.RPC())) +} + +func (s *OpSupervisor) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.service == nil { + s.logger.Warn("Supervisor already stopped") + return + } + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-quit + s.logger.Info("Closing supervisor") + closeErr := s.service.Stop(ctx) + s.logger.Info("Closed supervisor", "err", closeErr) + + s.service = nil +} + +func WithOPSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) + require := p.Require() + + l1EL, ok := orch.l1ELs.Get(l1ELID) + require.True(ok, "need L1 EL node to connect supervisor to") + + cluster, ok := orch.clusters.Get(clusterID) + require.True(ok, "need cluster to determine dependency set") + + require.NotNil(cluster.cfgset, "need a full config set") + require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") + cfg := &supervisorConfig.Config{ + MetricsConfig: metrics.CLIConfig{ + Enabled: false, + }, + PprofConfig: oppprof.CLIConfig{ + ListenEnabled: false, + }, + LogConfig: oplog.CLIConfig{ // ignored, logger overrides this + Level: log.LevelDebug, + Format: oplog.FormatText, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + // When supervisor starts, store its RPC port here + // given by the os, to reclaim when restart. + ListenPort: 0, + EnableAdmin: true, + }, + SyncSources: &syncnode.CLISyncNodes{}, // no sync-sources + L1RPC: l1EL.UserRPC(), + // Note: datadir is created here, + // persistent across stop/start, for the duration of the package execution. + Datadir: orch.p.TempDir(), + Version: "dev", + FullConfigSetSource: cluster.cfgset, + MockRun: false, + SynchronousProcessors: false, + DatadirSyncEndpoint: "", + } + + plog := p.Logger() + supervisorNode := &OpSupervisor{ + id: supervisorID, + userRPC: "", // set on start + cfg: cfg, + p: p, + logger: plog, + service: nil, // set on start + } + orch.supervisors.Set(supervisorID, supervisorNode) + supervisorNode.Start() + orch.p.Cleanup(supervisorNode.Stop) + }) +} diff --git a/op-devstack/sysgo/sync_tester.go b/op-devstack/sysgo/sync_tester.go new file mode 100644 index 0000000000000..ace022fd63938 --- /dev/null +++ b/op-devstack/sysgo/sync_tester.go @@ -0,0 +1,131 @@ +package sysgo + +import ( + "context" + "fmt" + + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-sync-tester/config" + + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" + + stconf "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/config" + sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" +) + +// Caveat: id is binded by a single EL(chainID), but service can support multiple ELs +type SyncTesterService struct { + id stack.SyncTesterID + service *synctester.Service +} + +func (n *SyncTesterService) hydrate(system stack.ExtensibleSystem) { + require := system.T().Require() + + for syncTesterID, chainID := range n.service.SyncTesters() { + syncTesterRPC := n.service.SyncTesterRPC(chainID, false) + rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), syncTesterRPC, client.WithLazyDial()) + require.NoError(err) + system.T().Cleanup(rpcCl.Close) + id := stack.NewSyncTesterID(syncTesterID.String(), chainID) + front := shim.NewSyncTester(shim.SyncTesterConfig{ + CommonConfig: shim.NewCommonConfig(system.T()), + ID: id, + Addr: syncTesterRPC, + Client: rpcCl, + }) + net := system.Network(chainID).(stack.ExtensibleNetwork) + net.AddSyncTester(front) + } +} + +func WithSyncTester(syncTesterID stack.SyncTesterID, l2ELs []stack.L2ELNodeID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) + + require := p.Require() + + require.Nil(orch.syncTester, "can only support a single sync-tester-service in sysgo") + + syncTesters := make(map[sttypes.SyncTesterID]*stconf.SyncTesterEntry) + + for _, elID := range l2ELs { + id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", elID.ChainID())) + require.NotContains(syncTesters, id, "one sync tester per chain only") + + el, ok := orch.l2ELs.Get(elID) + require.True(ok, "need L2 EL for sync tester", elID) + + syncTesters[id] = &stconf.SyncTesterEntry{ + ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, + ChainID: elID.ChainID(), + } + } + + cfg := &config.Config{ + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + SyncTesters: &stconf.Config{ + SyncTesters: syncTesters, + }, + } + logger := p.Logger() + srv, err := synctester.FromConfig(p.Ctx(), cfg, logger) + require.NoError(err, "must setup sync tester service") + require.NoError(srv.Start(p.Ctx())) + p.Cleanup(func() { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-quit + logger.Info("Closing sync tester") + _ = srv.Stop(ctx) + logger.Info("Closed sync tester") + }) + orch.syncTester = &SyncTesterService{id: syncTesterID, service: srv} + }) +} + +func WithSyncTesterWithExternalEndpoint(syncTesterID stack.SyncTesterID, endpointRPC string, chainID eth.ChainID) stack.Option[*Orchestrator] { + return stack.AfterDeploy(func(orch *Orchestrator) { + p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) + + require := p.Require() + + require.Nil(orch.syncTester, "can only support a single sync-tester-service in sysgo") + + syncTesters := make(map[sttypes.SyncTesterID]*stconf.SyncTesterEntry) + + // Create a sync tester entry with the external endpoint + id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", chainID)) + syncTesters[id] = &stconf.SyncTesterEntry{ + ELRPC: endpoint.MustRPC{Value: endpoint.URL(endpointRPC)}, + ChainID: chainID, + } + + cfg := &config.Config{ + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + SyncTesters: &stconf.Config{ + SyncTesters: syncTesters, + }, + } + logger := p.Logger() + srv, err := synctester.FromConfig(p.Ctx(), cfg, logger) + require.NoError(err, "must setup sync tester service") + require.NoError(srv.Start(p.Ctx())) + p.Cleanup(func() { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-quit + logger.Info("Closing sync tester") + _ = srv.Stop(ctx) + logger.Info("Closed sync tester") + }) + orch.syncTester = &SyncTesterService{id: syncTesterID, service: srv} + }) +} diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 7b455ce9bad87..8ee74cbf0802b 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -2,6 +2,7 @@ package sysgo import ( "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -64,8 +65,8 @@ func DefaultMinimalSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestra opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - opt.Add(WithL2ELNode(ids.L2EL, nil)) - opt.Add(WithL2CLNode(ids.L2CL, true, false, ids.L1CL, ids.L1EL, ids.L2EL)) + opt.Add(WithL2ELNode(ids.L2EL)) + opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) @@ -85,6 +86,65 @@ func DefaultMinimalSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestra return opt } +type DefaultMinimalSystemWithSyncTesterIDs struct { + DefaultMinimalSystemIDs + + SyncTester stack.SyncTesterID +} + +func NewDefaultMinimalSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemWithSyncTesterIDs { + minimal := NewDefaultMinimalSystemIDs(l1ID, l2ID) + return DefaultMinimalSystemWithSyncTesterIDs{ + DefaultMinimalSystemIDs: minimal, + SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), + } +} + +func DefaultMinimalSystemWithSyncTester(dest *DefaultMinimalSystemWithSyncTesterIDs, fcu eth.FCUState) stack.Option[*Orchestrator] { + l1ID := eth.ChainIDFromUInt64(900) + l2ID := eth.ChainIDFromUInt64(901) + ids := NewDefaultMinimalSystemWithSyncTesterIDs(l1ID, l2ID) + + opt := stack.Combine[*Orchestrator]() + opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { + o.P().Logger().Info("Setting up") + })) + + opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) + + opt.Add(WithDeployer(), + WithDeployerOptions( + WithLocalContractSources(), + WithCommons(ids.L1.ChainID()), + WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), + ), + ) + + opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) + + opt.Add(WithL2ELNode(ids.L2EL)) + opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) + + opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) + opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) + + opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + + opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) + + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + ids.L2EL, + })) + + opt.Add(WithSyncTester(ids.SyncTester, []stack.L2ELNodeID{ids.L2EL})) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + *dest = ids + })) + + return opt +} + type DefaultSingleChainInteropSystemIDs struct { L1 stack.L1NetworkID L1EL stack.L1ELNodeID @@ -168,8 +228,8 @@ func baseInteropSystem(ids *DefaultSingleChainInteropSystemIDs) stack.Option[*Or opt.Add(WithSupervisor(ids.Supervisor, ids.Cluster, ids.L1EL)) - opt.Add(WithL2ELNode(ids.L2AEL, &ids.Supervisor)) - opt.Add(WithL2CLNode(ids.L2ACL, true, true, ids.L1CL, ids.L1EL, ids.L2AEL)) + opt.Add(WithL2ELNode(ids.L2AEL, L2ELWithSupervisor(ids.Supervisor))) + opt.Add(WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, L2CLSequencer(), L2CLIndexing())) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) @@ -218,8 +278,8 @@ func DefaultInteropSystem(dest *DefaultInteropSystemIDs) stack.Option[*Orchestra WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), WithInteropAtGenesis(), // this can be overridden by later options )) - opt.Add(WithL2ELNode(ids.L2BEL, &ids.Supervisor)) - opt.Add(WithL2CLNode(ids.L2BCL, true, true, ids.L1CL, ids.L1EL, ids.L2BEL)) + opt.Add(WithL2ELNode(ids.L2BEL, L2ELWithSupervisor(ids.Supervisor))) + opt.Add(WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, L2CLSequencer(), L2CLIndexing())) opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) opt.Add(WithManagedBySupervisor(ids.L2BCL, ids.Supervisor)) @@ -272,16 +332,18 @@ func defaultSuperProofsSystem(dest *DefaultInteropSystemIDs, deployerOpts ...Dep WithCommons(ids.L1.ChainID()), WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), + WithDevFeatureBitmap(deployer.OptimismPortalInteropDevFlag), }, deployerOpts...)...)) opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) opt.Add(WithSupervisor(ids.Supervisor, ids.Cluster, ids.L1EL)) - opt.Add(WithL2ELNode(ids.L2AEL, &ids.Supervisor)) - opt.Add(WithL2CLNode(ids.L2ACL, true, true, ids.L1CL, ids.L1EL, ids.L2AEL)) - opt.Add(WithL2ELNode(ids.L2BEL, &ids.Supervisor)) - opt.Add(WithL2CLNode(ids.L2BCL, true, true, ids.L1CL, ids.L1EL, ids.L2BEL)) + opt.Add(WithL2ELNode(ids.L2AEL, L2ELWithSupervisor(ids.Supervisor))) + opt.Add(WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, L2CLSequencer(), L2CLIndexing())) + + opt.Add(WithL2ELNode(ids.L2BEL, L2ELWithSupervisor(ids.Supervisor))) + opt.Add(WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, L2CLSequencer(), L2CLIndexing())) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) @@ -340,11 +402,11 @@ func MultiSupervisorInteropSystem(dest *MultiSupervisorInteropSystemIDs) stack.O // add backup supervisor opt.Add(WithSupervisor(ids.SupervisorSecondary, ids.Cluster, ids.L1EL)) - opt.Add(WithL2ELNode(ids.L2A2EL, &ids.SupervisorSecondary)) - opt.Add(WithL2CLNode(ids.L2A2CL, false, true, ids.L1CL, ids.L1EL, ids.L2A2EL)) + opt.Add(WithL2ELNode(ids.L2A2EL, L2ELWithSupervisor(ids.SupervisorSecondary))) + opt.Add(WithL2CLNode(ids.L2A2CL, ids.L1CL, ids.L1EL, ids.L2A2EL, L2CLIndexing())) - opt.Add(WithL2ELNode(ids.L2B2EL, &ids.SupervisorSecondary)) - opt.Add(WithL2CLNode(ids.L2B2CL, false, true, ids.L1CL, ids.L1EL, ids.L2B2EL)) + opt.Add(WithL2ELNode(ids.L2B2EL, L2ELWithSupervisor(ids.SupervisorSecondary))) + opt.Add(WithL2CLNode(ids.L2B2CL, ids.L1CL, ids.L1EL, ids.L2B2EL, L2CLIndexing())) // verifier must be also managed or it cannot advance // we attach verifier L2CL with backup supervisor diff --git a/op-devstack/sysgo/system_singlechain_multinode.go b/op-devstack/sysgo/system_singlechain_multinode.go index ef86414781689..93bf33ae01386 100644 --- a/op-devstack/sysgo/system_singlechain_multinode.go +++ b/op-devstack/sysgo/system_singlechain_multinode.go @@ -27,8 +27,8 @@ func DefaultSingleChainMultiNodeSystem(dest *DefaultSingleChainMultiNodeSystemID opt := stack.Combine[*Orchestrator]() opt.Add(DefaultMinimalSystem(&dest.DefaultMinimalSystemIDs)) - opt.Add(WithL2ELNode(ids.L2ELB, nil)) - opt.Add(WithL2CLNode(ids.L2CLB, false, false, ids.L1CL, ids.L1EL, ids.L2ELB)) + opt.Add(WithL2ELNode(ids.L2ELB)) + opt.Add(WithL2CLNode(ids.L2CLB, ids.L1CL, ids.L1EL, ids.L2ELB)) // P2P connect L2CL nodes opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLB)) @@ -39,3 +39,18 @@ func DefaultSingleChainMultiNodeSystem(dest *DefaultSingleChainMultiNodeSystemID })) return opt } + +func DefaultSingleChainMultiNodeSystemWithoutP2P(dest *DefaultSingleChainMultiNodeSystemIDs) stack.Option[*Orchestrator] { + ids := NewDefaultSingleChainMultiNodeSystemIDs(DefaultL1ID, DefaultL2AID) + + opt := stack.Combine[*Orchestrator]() + opt.Add(DefaultMinimalSystem(&dest.DefaultMinimalSystemIDs)) + + opt.Add(WithL2ELNode(ids.L2ELB)) + opt.Add(WithL2CLNode(ids.L2CLB, ids.L1CL, ids.L1EL, ids.L2ELB)) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + *dest = ids + })) + return opt +} diff --git a/op-devstack/sysgo/system_synctester.go b/op-devstack/sysgo/system_synctester.go new file mode 100644 index 0000000000000..d5e6d6f9c0ff0 --- /dev/null +++ b/op-devstack/sysgo/system_synctester.go @@ -0,0 +1,77 @@ +package sysgo + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type DefaultSimpleSystemWithSyncTesterIDs struct { + DefaultMinimalSystemIDs + + L2CL2 stack.L2CLNodeID + SyncTesterL2EL stack.L2ELNodeID + SyncTester stack.SyncTesterID +} + +func NewDefaultSimpleSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultSimpleSystemWithSyncTesterIDs { + minimal := NewDefaultMinimalSystemIDs(l1ID, l2ID) + return DefaultSimpleSystemWithSyncTesterIDs{ + DefaultMinimalSystemIDs: minimal, + L2CL2: stack.NewL2CLNodeID("verifier", l2ID), + SyncTesterL2EL: stack.NewL2ELNodeID("sync-tester-el", l2ID), + SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), + } +} + +func DefaultSimpleSystemWithSyncTester(dest *DefaultSimpleSystemWithSyncTesterIDs) stack.Option[*Orchestrator] { + l1ID := eth.ChainIDFromUInt64(900) + l2ID := eth.ChainIDFromUInt64(901) + ids := NewDefaultSimpleSystemWithSyncTesterIDs(l1ID, l2ID) + + opt := stack.Combine[*Orchestrator]() + opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { + o.P().Logger().Info("Setting up") + })) + + opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) + + opt.Add(WithDeployer(), + WithDeployerOptions( + WithLocalContractSources(), + WithCommons(ids.L1.ChainID()), + WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), + ), + ) + + opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) + + opt.Add(WithL2ELNode(ids.L2EL)) + opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) + + opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) + opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) + + opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + + opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) + + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + ids.L2EL, + })) + + opt.Add(WithSyncTester(ids.SyncTester, []stack.L2ELNodeID{ids.L2EL})) + + // Create a SyncTesterEL with the same chain ID as the EL node + opt.Add(WithSyncTesterL2ELNode(ids.SyncTesterL2EL, ids.L2EL)) + opt.Add(WithL2CLNode(ids.L2CL2, ids.L1CL, ids.L1EL, ids.SyncTesterL2EL)) + + // P2P Connect CLs to signal unsafe heads + opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CL2)) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + *dest = ids + })) + + return opt +} diff --git a/op-devstack/sysgo/system_synctester_ext.go b/op-devstack/sysgo/system_synctester_ext.go new file mode 100644 index 0000000000000..bf09f043a4ee9 --- /dev/null +++ b/op-devstack/sysgo/system_synctester_ext.go @@ -0,0 +1,100 @@ +package sysgo + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-node/chaincfg" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/core" +) + +type DefaultMinimalExternalELSystemIDs struct { + L1 stack.L1NetworkID + L1EL stack.L1ELNodeID + L1CL stack.L1CLNodeID + + L2 stack.L2NetworkID + L2CL stack.L2CLNodeID + L2EL stack.L2ELNodeID + L2ELReadOnly stack.L2ELNodeID + + SyncTester stack.SyncTesterID +} + +func NewExternalELSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalExternalELSystemIDs { + ids := DefaultMinimalExternalELSystemIDs{ + L1: stack.L1NetworkID(l1ID), + L1EL: stack.NewL1ELNodeID("l1", l1ID), + L1CL: stack.NewL1CLNodeID("l1", l1ID), + L2: stack.L2NetworkID(l2ID), + L2CL: stack.NewL2CLNodeID("verifier", l2ID), + L2EL: stack.NewL2ELNodeID("sync-tester-el", l2ID), + L2ELReadOnly: stack.NewL2ELNodeID("l2-el-readonly", l2ID), + SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), + } + return ids +} + +// ExternalELSystemWithEndpointAndSuperchainRegistry creates a minimal external EL system +// using a network from the superchain registry instead of the deployer +func ExternalELSystemWithEndpointAndSuperchainRegistry(dest *DefaultMinimalExternalELSystemIDs, networkPreset stack.ExtNetworkConfig) stack.Option[*Orchestrator] { + chainCfg := chaincfg.ChainByName(networkPreset.L2NetworkName) + if chainCfg == nil { + panic(fmt.Sprintf("network %s not found in superchain registry", networkPreset.L2NetworkName)) + } + l2ChainID := eth.ChainIDFromUInt64(chainCfg.ChainID) + + ids := NewExternalELSystemIDs(networkPreset.L1ChainID, l2ChainID) + + opt := stack.Combine[*Orchestrator]() + opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { + o.P().Logger().Info("Setting up with superchain registry network", "network", networkPreset.L2NetworkName) + })) + + opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) + + // We must supply the full L1 Chain Config, so look that up or fail if unknown + chainID := ids.L1.ChainID() + l1ChainConfig := eth.L1ChainConfigByChainID(chainID) + if l1ChainConfig == nil { + panic(fmt.Sprintf("unsupported L1 chain ID: %s", chainID)) + } + + // Skip deployer since we're using external L1 and superchain registry for L2 config + // Create L1 network record for external L1 + opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { + l1Net := &L1Network{ + id: ids.L1, + genesis: &core.Genesis{ + Config: l1ChainConfig, + }, + blockTime: 12, + } + o.l1Nets.Set(ids.L1.ChainID(), l1Net) + })) + + opt.Add(WithExtL1Nodes(ids.L1EL, ids.L1CL, networkPreset.L1ELEndpoint, networkPreset.L1CLBeaconEndpoint)) + + // Use superchain registry instead of deployer + opt.Add(WithL2NetworkFromSuperchainRegistryWithDependencySet( + stack.L2NetworkID(l2ChainID), + networkPreset.L2NetworkName, + )) + + // Add SyncTester service with external endpoint + opt.Add(WithSyncTesterWithExternalEndpoint(ids.SyncTester, networkPreset.L2ELEndpoint, l2ChainID)) + + // Add SyncTesterL2ELNode as the L2EL replacement for real-world EL endpoint + opt.Add(WithSyncTesterL2ELNode(ids.L2EL, ids.L2EL)) + opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL)) + + opt.Add(WithExtL2Node(ids.L2ELReadOnly, networkPreset.L2ELEndpoint)) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + *dest = ids + })) + + return opt +} diff --git a/op-devstack/sysgo/test_sequencer.go b/op-devstack/sysgo/test_sequencer.go index ffc0a52f6dfe1..22f9cf011ccea 100644 --- a/op-devstack/sysgo/test_sequencer.go +++ b/op-devstack/sysgo/test_sequencer.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" @@ -80,8 +81,13 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN logger := p.Logger() + orch.writeDefaultJWT() l1EL, ok := orch.l1ELs.Get(l1ELID) require.True(ok, "l1 EL node required") + l1ELClient, err := ethclient.DialContext(p.Ctx(), l1EL.UserRPC()) + require.NoError(err) + engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), orch.jwtSecret) + require.NoError(err) l1CL, ok := orch.l1CLs.Get(l1CLID) require.True(ok, "l1 CL node required") @@ -109,24 +115,30 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN l2SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2CLID.ChainID())) l1SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ELID.ChainID())) + l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + require.True(ok, "l1 net required") + v := &config.Ensemble{ Builders: map[seqtypes.BuilderID]*config.BuilderEntry{ bid_L2: { Standard: &standardbuilder.Config{ + L1ChainConfig: l1Net.genesis.Config, L1EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l1EL.userRPC), + Value: endpoint.HttpURL(l1EL.UserRPC()), }, L2EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2EL.userRPC), + Value: endpoint.HttpURL(l2EL.UserRPC()), }, L2CL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.userRPC), + Value: endpoint.HttpURL(l2CL.UserRPC()), }, }, }, bid_L1: { L1: &fakepos.Config{ - GethBackend: l1EL.l1Geth.Backend, + ChainConfig: orch.wb.outL1Genesis.Config, + EngineAPI: engineCl, + Backend: l1ELClient, Beacon: l1CL.beacon, FinalizedDistance: 20, SafeDistance: 10, @@ -149,7 +161,7 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN cid_L2: { Standard: &standardcommitter.Config{ RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.userRPC), + Value: endpoint.HttpURL(l2CL.UserRPC()), }, }, }, @@ -161,7 +173,7 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN pid_L2: { Standard: &standardpublisher.Config{ RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.userRPC), + Value: endpoint.HttpURL(l2CL.UserRPC()), }, }, }, diff --git a/op-dispute-mon/mon/extract/caller.go b/op-dispute-mon/mon/extract/caller.go index eff20e7effbdf..9658c2533c963 100644 --- a/op-dispute-mon/mon/extract/caller.go +++ b/op-dispute-mon/mon/extract/caller.go @@ -53,12 +53,14 @@ func (g *GameCallerCreator) CreateContract(ctx context.Context, game gameTypes.G switch faultTypes.GameType(game.GameType) { case faultTypes.CannonGameType, faultTypes.PermissionedGameType, + faultTypes.CannonKonaGameType, faultTypes.AsteriscGameType, faultTypes.AlphabetGameType, faultTypes.FastGameType, faultTypes.AsteriscKonaGameType, faultTypes.SuperCannonGameType, faultTypes.SuperPermissionedGameType, + faultTypes.SuperCannonKonaGameType, faultTypes.SuperAsteriscKonaGameType: fdg, err := contracts.NewFaultDisputeGameContract(ctx, g.m, game.Proxy, g.caller) if err != nil { diff --git a/op-dispute-mon/mon/extract/caller_test.go b/op-dispute-mon/mon/extract/caller_test.go index bbca26d665341..585ab609392fd 100644 --- a/op-dispute-mon/mon/extract/caller_test.go +++ b/op-dispute-mon/mon/extract/caller_test.go @@ -35,6 +35,10 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validPermissionedGameType", game: types.GameMetadata{GameType: uint32(faultTypes.PermissionedGameType), Proxy: fdgAddr}, }, + { + name: "validCannonKonaGameType", + game: types.GameMetadata{GameType: uint32(faultTypes.CannonKonaGameType), Proxy: fdgAddr}, + }, { name: "validAsteriscGameType", game: types.GameMetadata{GameType: uint32(faultTypes.AsteriscGameType), Proxy: fdgAddr}, @@ -59,6 +63,10 @@ func TestMetadataCreator_CreateContract(t *testing.T) { name: "validSuperPermissionedGameType", game: types.GameMetadata{GameType: uint32(faultTypes.SuperPermissionedGameType), Proxy: fdgAddr}, }, + { + name: "validSuperCannonKonaGameType", + game: types.GameMetadata{GameType: uint32(faultTypes.SuperCannonKonaGameType), Proxy: fdgAddr}, + }, { name: "validSuperAsteriscKonaGameType", game: types.GameMetadata{GameType: uint32(faultTypes.SuperAsteriscKonaGameType), Proxy: fdgAddr}, @@ -93,7 +101,10 @@ func TestMetadataCreator_CreateContract(t *testing.T) { func setupMetadataLoaderTest(t *testing.T, gameType uint32) (*batching.MultiCaller, *mockCacheMetrics) { fdgAbi := snapshots.LoadFaultDisputeGameABI() - if gameType == uint32(faultTypes.SuperPermissionedGameType) || gameType == uint32(faultTypes.SuperCannonGameType) || gameType == uint32(faultTypes.SuperAsteriscKonaGameType) { + if gameType == uint32(faultTypes.SuperPermissionedGameType) || + gameType == uint32(faultTypes.SuperCannonGameType) || + gameType == uint32(faultTypes.SuperCannonKonaGameType) || + gameType == uint32(faultTypes.SuperAsteriscKonaGameType) { fdgAbi = snapshots.LoadSuperFaultDisputeGameABI() } stubRpc := batchingTest.NewAbiBasedRpc(t, fdgAddr, fdgAbi) diff --git a/op-dispute-mon/mon/service.go b/op-dispute-mon/mon/service.go index 78180f6387c02..658ffd8767d1c 100644 --- a/op-dispute-mon/mon/service.go +++ b/op-dispute-mon/mon/service.go @@ -177,7 +177,7 @@ func (s *Service) initOutputRollupClient(ctx context.Context, cfg *config.Config return nil } for _, rpc := range cfg.RollupRpcs { - client, err := dial.DialRollupClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, rpc) + client, err := dial.DialRollupClientWithTimeout(ctx, s.logger, rpc, rpcclient.WithLazyDial()) if err != nil { return fmt.Errorf("failed to dial rollup client %s: %w", rpc, err) } @@ -191,18 +191,17 @@ func (s *Service) initSupervisorClients(ctx context.Context, cfg *config.Config) return nil } for _, rpc := range cfg.SupervisorRpcs { - rpcClient, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, rpc) + client, err := dial.DialSupervisorClientWithTimeout(ctx, s.logger, rpc, rpcclient.WithLazyDial()) if err != nil { return fmt.Errorf("failed to dial supervisor client %s: %w", rpc, err) } - client := sources.NewSupervisorClient(rpcclient.NewBaseRPCClient(rpcClient)) s.supervisorClients = append(s.supervisorClients, client) } return nil } func (s *Service) initL1Client(ctx context.Context, cfg *config.Config) error { - l1RPC, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, s.logger, cfg.L1EthRpc) + l1RPC, err := dial.DialRPCClientWithTimeout(ctx, s.logger, cfg.L1EthRpc) if err != nil { return fmt.Errorf("failed to dial L1: %w", err) } diff --git a/op-e2e/Makefile b/op-e2e/Makefile index e01a7ab27eb16..73273e3e855b6 100644 --- a/op-e2e/Makefile +++ b/op-e2e/Makefile @@ -65,3 +65,13 @@ fuzz: "go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz FuzzFastLzGethSolidity ./opgeth" \ "go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz FuzzFastLzCgo ./opgeth" \ | parallel -j 8 {} + +ifndef CONTRACT +gen-binding: + $(error CONTRACT is required, usage: make gen-binding CONTRACT=OPContractsManager) +else +gen-binding: + cd ../packages/contracts-bedrock && just build + ./scripts/gen-binding.sh $(CONTRACT) +endif +.PHONY: gen-binding diff --git a/op-e2e/README.md b/op-e2e/README.md index ba3855474615f..6c039bc6d82d4 100644 --- a/op-e2e/README.md +++ b/op-e2e/README.md @@ -39,6 +39,15 @@ make test-ws - `op-e2e/opgeth`: integration tests between test-mocks and op-geth execution-engine. - also includes upgrade-tests to ensure testing of op-stack Go components around a network upgrade. +### Generating Binding + +Bindings for a contract can be generated (or updated) using + +``` +make gen-binding CONTRACT=OPContractsManager +``` + + ### `action`-tests Action tests are set up in a compositional way: @@ -58,6 +67,10 @@ E.g. P2P, CLI usage, and dynamic block building are not covered. ### `system`-tests +> [!IMPORTANT] +> System tests are deprecated. While existing tests should continue to be maintained, +> any net-new tests should be added in [op-acceptance-tests](../op-acceptance-tests/) instead. + System tests are more complete than `action` tests, but also require a live system. This trade-off enables coverage of most of each Go service, at the cost of making navigation to cover the known edge-cases less reliable and reproducible. diff --git a/op-e2e/actions/altda/altda_test.go b/op-e2e/actions/altda/altda_test.go index c1363e07386fa..27d7bb231ad2d 100644 --- a/op-e2e/actions/altda/altda_test.go +++ b/op-e2e/actions/altda/altda_test.go @@ -85,7 +85,7 @@ func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{}) - sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, sd.DependencySet, 0) + sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, 0) miner.ActL1SetFeeRecipient(common.Address{'A'}) sequencer.ActL2PipelineFull(t) @@ -143,7 +143,7 @@ func (a *L2AltDA) NewVerifier(t helpers.Testing) *helpers.L2Verifier { daMgr := altda.NewAltDAWithStorage(a.log, a.altDACfg, a.storage, &altda.NoopMetrics{}) - verifier := helpers.NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, a.sd.DependencySet, &sync.Config{}, safedb.Disabled) + verifier := helpers.NewL2Verifier(t, a.log, l1F, a.miner.BlobStore(), daMgr, engCl, a.sd.RollupCfg, a.sd.L1Cfg.Config, a.sd.DependencySet, &sync.Config{}, safedb.Disabled) return verifier } diff --git a/op-e2e/actions/derivation/batch_queue_test.go b/op-e2e/actions/derivation/batch_queue_test.go index bb32d10284ec9..65ced92ea1dfa 100644 --- a/op-e2e/actions/derivation/batch_queue_test.go +++ b/op-e2e/actions/derivation/batch_queue_test.go @@ -95,7 +95,7 @@ func TestDeriveChainFromNearL1Genesis(gt *testing.T) { l2Cl, err := sources.NewEngineClient(seqEngine.RPCClient(), logger, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(gt, err) verifier := helpers.NewL2Verifier(t, logger, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, - l2Cl, sd.RollupCfg, sd.DependencySet, &sync.Config{}, safedb.Disabled) + l2Cl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, &sync.Config{}, safedb.Disabled) verifier.ActL2PipelineFull(t) // Should not get stuck in a reset loop forever require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentSafeBlock().Number.Uint64()) require.EqualValues(gt, l2BlockNum, seqEngine.L2Chain().CurrentFinalBlock().Number.Uint64()) diff --git a/op-e2e/actions/derivation/reorg_test.go b/op-e2e/actions/derivation/reorg_test.go index 36d2458b425e7..b1390952a136f 100644 --- a/op-e2e/actions/derivation/reorg_test.go +++ b/op-e2e/actions/derivation/reorg_test.go @@ -596,7 +596,7 @@ func RestartOpGeth(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { engRpc := &rpcWrapper{seqEng.RPCClient()} l2Cl, err := sources.NewEngineClient(engRpc, log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) - sequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, sd.DependencySet, 0) + sequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, 0) batcher := actionsHelpers.NewL2Batcher(log, sd.RollupCfg, actionsHelpers.DefaultBatcherCfg(dp), sequencer.RollupClient(), miner.EthClient(), seqEng.EthClient(), seqEng.EngineClient(t, sd.RollupCfg)) @@ -684,7 +684,7 @@ func ConflictingL2Blocks(gt *testing.T, deltaTimeOffset *hexutil.Uint64) { require.NoError(t, err) l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindStandard)) require.NoError(t, err) - altSequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, sd.DependencySet, 0) + altSequencer := actionsHelpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), altda.Disabled, altSeqEngCl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, 0) altBatcher := actionsHelpers.NewL2Batcher(log, sd.RollupCfg, actionsHelpers.DefaultBatcherCfg(dp), altSequencer.RollupClient(), miner.EthClient(), altSeqEng.EthClient(), altSeqEng.EngineClient(t, sd.RollupCfg)) diff --git a/op-e2e/actions/helpers/l1_miner.go b/op-e2e/actions/helpers/l1_miner.go index bf47753f48de5..18503f8163441 100644 --- a/op-e2e/actions/helpers/l1_miner.go +++ b/op-e2e/actions/helpers/l1_miner.go @@ -1,6 +1,7 @@ package helpers import ( + "fmt" "math/big" "github.com/ethereum-optimism/optimism/op-program/host/prefetcher" @@ -14,11 +15,12 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/kzg4844" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" "github.com/ethereum/go-ethereum/triedb" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -26,7 +28,7 @@ import ( type L1Miner struct { L1Replica - blobStore *e2eutils.BlobsStore + blobStore *blobstore.Store // L1 block building preferences prefCoinbase common.Address @@ -49,7 +51,7 @@ func NewL1Miner(t Testing, log log.Logger, genesis *core.Genesis) *L1Miner { rep := NewL1Replica(t, log, genesis) return &L1Miner{ L1Replica: *rep, - blobStore: e2eutils.NewBlobStore(), + blobStore: blobstore.New(), } } @@ -57,7 +59,7 @@ func (s *L1Miner) BlobSource() prefetcher.L1BlobSource { return s.blobStore } -func (s *L1Miner) BlobStore() *e2eutils.BlobsStore { +func (s *L1Miner) BlobStore() *blobstore.Store { return s.blobStore } @@ -145,6 +147,7 @@ func (s *L1Miner) ActL1IncludeTx(from common.Address) Action { t.InvalidAction("no tx inclusion when not building l1 block") return } + require.NoError(t, s.Eth.TxPool().Sync(), "must sync tx-pool to get accurate pending txs") getPendingIndex := func(from common.Address) uint64 { return s.pendingIndices[from] } @@ -195,14 +198,50 @@ func (s *L1Miner) IncludeTx(t Testing, tx *types.Transaction) *types.Receipt { if tx.Type() == types.BlobTxType { require.True(t, s.l1Cfg.Config.IsCancun(s.l1BuildingHeader.Number, s.l1BuildingHeader.Time), "L1 must be cancun to process blob tx") sidecar := tx.BlobTxSidecar() - if sidecar != nil { - s.l1BuildingBlobSidecars = append(s.l1BuildingBlobSidecars, sidecar) + require.NotNil(t, sidecar, "missing sidecar in blob transaction") + hashes := tx.BlobHashes() + require.Greater(t, len(hashes), 0, "blobless blob transaction") + require.NoError(t, sidecar.ValidateBlobCommitmentHashes(hashes)) + if s.l1Cfg.Config.IsOsaka(s.l1BuildingHeader.Number, s.l1BuildingHeader.Time) { + require.NoError(t, validateBlobSidecarOsaka(sidecar, hashes)) + } else { + require.NoError(t, validateBlobSidecarLegacy(sidecar, hashes)) } + s.l1BuildingBlobSidecars = append(s.l1BuildingBlobSidecars, sidecar) *s.l1BuildingHeader.BlobGasUsed += receipt.BlobGasUsed } return receipt } +// validateBlobSidecarLegacy implements pre-Osaka sidecar validation. +// Copied and adapted from op-geth core/txpool/validation.go +func validateBlobSidecarLegacy(sidecar *types.BlobTxSidecar, hashes []common.Hash) error { + if sidecar.Version != types.BlobSidecarVersion0 { + return fmt.Errorf("invalid sidecar version pre-osaka: %v", sidecar.Version) + } + if len(sidecar.Proofs) != len(hashes) { + return fmt.Errorf("invalid number of %d blob proofs expected %d", len(sidecar.Proofs), len(hashes)) + } + for i := range sidecar.Blobs { + if err := kzg4844.VerifyBlobProof(&sidecar.Blobs[i], sidecar.Commitments[i], sidecar.Proofs[i]); err != nil { + return fmt.Errorf("invalid blob %d: %w", i, err) + } + } + return nil +} + +// validateBlobSidecarOsaka implements Osaka sidecar validation. +// Copied and adapted from op-geth core/txpool/validation.go +func validateBlobSidecarOsaka(sidecar *types.BlobTxSidecar, hashes []common.Hash) error { + if sidecar.Version != types.BlobSidecarVersion1 { + return fmt.Errorf("invalid sidecar version post-osaka: %v", sidecar.Version) + } + if len(sidecar.Proofs) != len(hashes)*kzg4844.CellProofsPerBlob { + return fmt.Errorf("invalid number of %d blob proofs expected %d", len(sidecar.Proofs), len(hashes)*kzg4844.CellProofsPerBlob) + } + return kzg4844.VerifyCellProofs(sidecar.Blobs, sidecar.Commitments, sidecar.Proofs) +} + func (s *L1Miner) ActL1SetFeeRecipient(coinbase common.Address) { s.prefCoinbase = coinbase if s.l1Building { @@ -265,6 +304,17 @@ func (s *L1Miner) ActEmptyBlock(t Testing) *types.Block { return s.ActL1EndBlock(t) } +func (s *L1Miner) ActBuildToOsaka(t Testing) *types.Block { + t.Helper() + require.NotNil(t, s.l1Cfg.Config.OsakaTime, "cannot activate OsakaTime when it is not scheduled") + h := s.L1Chain().CurrentHeader() + for h.Time < *s.l1Cfg.Config.OsakaTime { + h = s.ActEmptyBlock(t).Header() + } + require.True(t, s.l1Cfg.Config.IsOsaka(h.Number, h.Time), "Osaka not active at block", h.Number) + return s.L1Chain().GetBlockByHash(h.Hash()) +} + func (s *L1Miner) Close() error { return s.L1Replica.Close() } diff --git a/op-e2e/actions/helpers/l2_batcher.go b/op-e2e/actions/helpers/l2_batcher.go index 6ca2924aab4cf..e0d8eac5a284e 100644 --- a/op-e2e/actions/helpers/l2_batcher.go +++ b/op-e2e/actions/helpers/l2_batcher.go @@ -42,6 +42,7 @@ type L1TxAPI interface { PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) SendTransaction(ctx context.Context, tx *types.Transaction) error + BlobBaseFee(ctx context.Context) (*big.Int, error) } type AltDAInputSetter interface { @@ -63,6 +64,8 @@ type BatcherCfg struct { DataAvailabilityType batcherFlags.DataAvailabilityType AltDA AltDAInputSetter + + EnableCellProofs bool } func DefaultBatcherCfg(dp *e2eutils.DeployParams) *BatcherCfg { @@ -71,6 +74,7 @@ func DefaultBatcherCfg(dp *e2eutils.DeployParams) *BatcherCfg { MaxL1TxSize: 128_000, BatcherKey: dp.Secrets.Batcher, DataAvailabilityType: batcherFlags.CalldataType, + EnableCellProofs: false, // TODO change to true when Osaka activates on L1 } } @@ -372,10 +376,11 @@ func (s *L2Batcher) ActL2BatchSubmitRaw(t Testing, payload []byte, txOpts ...fun } else if s.l2BatcherCfg.DataAvailabilityType == batcherFlags.BlobsType { var b eth.Blob require.NoError(t, b.FromData(payload), "must turn data into blob") - sidecar, blobHashes, err := txmgr.MakeSidecar([]*eth.Blob{&b}) + sidecar, blobHashes, err := txmgr.MakeSidecar([]*eth.Blob{&b}, s.l2BatcherCfg.EnableCellProofs) require.NoError(t, err) require.NotNil(t, pendingHeader.ExcessBlobGas, "need L1 header with 4844 properties") - blobBaseFee := eth.CalcBlobFeeDefault(pendingHeader) + blobBaseFee, err := s.l1.BlobBaseFee(t.Ctx()) + require.NoError(t, err, "need blob base fee") blobFeeCap := new(uint256.Int).Mul(uint256.NewInt(2), uint256.MustFromBig(blobBaseFee)) if blobFeeCap.Lt(uint256.NewInt(params.GWei)) { // ensure we meet 1 gwei geth tx-pool minimum blobFeeCap = uint256.NewInt(params.GWei) @@ -456,10 +461,10 @@ func (s *L2Batcher) ActL2BatchSubmitMultiBlob(t Testing, numBlobs int) { require.NoError(t, err, "need l1 pending header for gas price estimation") gasFeeCap := new(big.Int).Add(gasTipCap, new(big.Int).Mul(pendingHeader.BaseFee, big.NewInt(2))) - sidecar, blobHashes, err := txmgr.MakeSidecar(blobs) + sidecar, blobHashes, err := txmgr.MakeSidecar(blobs, s.l2BatcherCfg.EnableCellProofs) require.NoError(t, err) - require.NotNil(t, pendingHeader.ExcessBlobGas, "need L1 header with 4844 properties") - blobBaseFee := eth.CalcBlobFeeDefault(pendingHeader) + blobBaseFee, err := s.l1.BlobBaseFee(t.Ctx()) + require.NoError(t, err, "need blob base fee") blobFeeCap := new(uint256.Int).Mul(uint256.NewInt(2), uint256.MustFromBig(blobBaseFee)) if blobFeeCap.Lt(uint256.NewInt(params.GWei)) { // ensure we meet 1 gwei geth tx-pool minimum blobFeeCap = uint256.NewInt(params.GWei) diff --git a/op-e2e/actions/helpers/l2_engine.go b/op-e2e/actions/helpers/l2_engine.go index 26e2a14dbf3c5..cdb504f44e648 100644 --- a/op-e2e/actions/helpers/l2_engine.go +++ b/op-e2e/actions/helpers/l2_engine.go @@ -205,6 +205,7 @@ func (e *L2Engine) ActL2IncludeTxIgnoreForcedEmpty(from common.Address) Action { e.log.Info("Ignoring e.L2ForceEmpty=true") } + require.NoError(t, e.Eth.TxPool().Sync(), "must sync tx-pool to get accurate pending txs") tx := firstValidTx(t, from, e.EngineApi.PendingIndices, e.Eth.TxPool().ContentFrom, e.EthClient().NonceAt) prevState := e.EngineApi.ForcedEmpty() e.EngineApi.SetForceEmpty(false) // ensure the engine API can include it @@ -229,6 +230,7 @@ func (e *L2Engine) ActL2IncludeTx(from common.Address) Action { return } + require.NoError(t, e.Eth.TxPool().Sync(), "must sync tx-pool to get accurate pending txs") tx := firstValidTx(t, from, e.EngineApi.PendingIndices, e.Eth.TxPool().ContentFrom, e.EthClient().NonceAt) _, err := e.EngineApi.IncludeTx(tx, from) if errors.Is(err, engineapi.ErrNotBuildingBlock) { diff --git a/op-e2e/actions/helpers/l2_sequencer.go b/op-e2e/actions/helpers/l2_sequencer.go index 94e1e34c19e0c..7da2f74c7d933 100644 --- a/op-e2e/actions/helpers/l2_sequencer.go +++ b/op-e2e/actions/helpers/l2_sequencer.go @@ -7,6 +7,7 @@ import ( "golang.org/x/time/rate" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/config" "github.com/ethereum-optimism/optimism/op-node/metrics" @@ -56,10 +57,11 @@ type L2Sequencer struct { } func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc derive.L1BlobsFetcher, - altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, depSet depset.DependencySet, seqConfDepth uint64, + altDASrc driver.AltDAIface, eng L2API, cfg *rollup.Config, l1ChainConfig *params.ChainConfig, + depSet depset.DependencySet, seqConfDepth uint64, ) *L2Sequencer { - ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, depSet, &sync.Config{}, safedb.Disabled) - attrBuilder := derive.NewFetchingAttributesBuilder(cfg, depSet, l1, eng) + ver := NewL2Verifier(t, log, l1, blobSrc, altDASrc, eng, cfg, l1ChainConfig, depSet, &sync.Config{}, safedb.Disabled) + attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1ChainConfig, depSet, l1, eng) seqConfDepthL1 := confdepth.NewConfDepth(seqConfDepth, ver.syncStatus.L1Head, l1) originSelector := sequencing.NewL1OriginSelector(t.Ctx(), log, cfg, seqConfDepthL1) l1OriginSelector := &MockL1OriginSelector{ @@ -70,7 +72,7 @@ func NewL2Sequencer(t Testing, log log.Logger, l1 derive.L1Fetcher, blobSrc deri conduc := &conductor.NoOpConductor{} asyncGossip := async.NoOpGossiper{} seq := sequencing.NewSequencer(t.Ctx(), log, cfg, attrBuilder, l1OriginSelector, - seqStateListener, conduc, asyncGossip, metr, nil) + seqStateListener, conduc, asyncGossip, metr, ver.engine, nil) opts := event.WithEmitLimiter( // TestSyncBatchType/DerivationWithFlakyL1RPC does *a lot* of quick retries // TestL2BatcherBatchType/ExtendedTimeWithoutL1Batches as well. @@ -123,10 +125,10 @@ func (s *L2Sequencer) ActL2EndBlock(t Testing) { require.NoError(t, s.drainer.DrainUntil(event.Is[engine.PayloadSuccessEvent], false), "failed to complete block building") - // After having built a L2 block, make sure to get an engine update processed. - // This will ensure the sync-status and such reflect the latest changes. - s.synchronousEvents.Emit(t.Ctx(), engine.TryUpdateEngineEvent{}) - s.synchronousEvents.Emit(t.Ctx(), engine.ForkchoiceRequestEvent{}) + // After having built a L2 block, make sure to get an engine update processed, + // and request a forkchoice update directly. + s.engine.TryUpdateEngine(t.Ctx()) + s.engine.RequestForkchoiceUpdate(t.Ctx()) require.NoError(t, s.drainer.DrainUntil(func(ev event.Event) bool { x, ok := ev.(engine.ForkchoiceUpdateEvent) return ok && x.UnsafeL2Head == s.engine.UnsafeL2Head() @@ -160,9 +162,9 @@ func (s *L2Sequencer) ActL2ForceAdvanceL1Origin(t Testing) { s.mockL1OriginSelector.originOverride = nextOrigin } -// ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L2 origin +// ActBuildToL1Head builds empty blocks until (incl.) the L1 head becomes the L1 origin of the L2 head func (s *L2Sequencer) ActBuildToL1Head(t Testing) { - for s.engine.UnsafeL2Head().L1Origin.Number < s.syncStatus.L1Head().Number { + for s.L2Unsafe().L1Origin.Number < s.syncStatus.L1Head().Number { s.ActL2PipelineFull(t) s.ActL2EmptyBlock(t) } diff --git a/op-e2e/actions/helpers/l2_verifier.go b/op-e2e/actions/helpers/l2_verifier.go index a22048c578403..b0b5f102650e6 100644 --- a/op-e2e/actions/helpers/l2_verifier.go +++ b/op-e2e/actions/helpers/l2_verifier.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "math/big" + "time" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/stretchr/testify/require" @@ -15,12 +16,13 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" gnode "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" + opnodemetrics "github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/node" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/attributes" - "github.com/ethereum-optimism/optimism/op-node/rollup/clsync" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/engine" @@ -64,6 +66,8 @@ type L2Verifier struct { engine *engine.EngineController derivationMetrics *testutils.TestDerivationMetrics derivation *derive.DerivationPipeline + syncDeriver *driver.SyncDeriver + finalizer driver.Finalizer safeHeadListener rollup.SafeHeadListener syncCfg *sync.Config @@ -73,7 +77,8 @@ type L2Verifier struct { L2PipelineIdle bool l2Building bool - RollupCfg *rollup.Config + L1ChainConfig *params.ChainConfig + RollupCfg *rollup.Config rpc *rpc.Server @@ -109,7 +114,8 @@ type safeDB interface { func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, blobsSrc derive.L1BlobsFetcher, altDASrc driver.AltDAIface, - eng L2API, cfg *rollup.Config, depSet depset.DependencySet, syncCfg *sync.Config, safeHeadListener safeDB, + eng L2API, cfg *rollup.Config, l1ChainConfig *params.ChainConfig, + depSet depset.DependencySet, syncCfg *sync.Config, safeHeadListener safeDB, ) *L2Verifier { ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(cancel) @@ -141,50 +147,61 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, } metrics := &testutils.TestDerivationMetrics{} - ec := engine.NewEngineController(eng, log, metrics, cfg, syncCfg, - sys.Register("engine-controller", nil, opts)) + ec := engine.NewEngineController(ctx, eng, log, opnodemetrics.NoopMetrics, cfg, syncCfg, l1, sys.Register("engine-controller", nil, opts)) - sys.Register("engine-reset", - engine.NewEngineResetDeriver(ctx, log, cfg, l1, eng, syncCfg), opts) - - clSync := clsync.NewCLSync(log, cfg, metrics) - sys.Register("cl-sync", clSync, opts) + if mm, ok := interopSys.(*indexing.IndexingMode); ok { + mm.SetEngineController(ec) + } var finalizer driver.Finalizer if cfg.AltDAEnabled() { - finalizer = finality.NewAltDAFinalizer(ctx, log, cfg, l1, altDASrc) + finalizer = finality.NewAltDAFinalizer(ctx, log, cfg, l1, altDASrc, ec) } else { - finalizer = finality.NewFinalizer(ctx, log, cfg, l1) + finalizer = finality.NewFinalizer(ctx, log, cfg, l1, ec) } sys.Register("finalizer", finalizer, opts) - sys.Register("attributes-handler", - attributes.NewAttributesHandler(log, cfg, ctx, eng), opts) + attrHandler := attributes.NewAttributesHandler(log, cfg, ctx, eng, ec) + sys.Register("attributes-handler", attrHandler, opts) + ec.SetAttributesResetter(attrHandler) indexingMode := interopSys != nil - pipeline := derive.NewDerivationPipeline(log, cfg, depSet, l1, blobsSrc, altDASrc, eng, metrics, indexingMode) - sys.Register("pipeline", derive.NewPipelineDeriver(ctx, pipeline), opts) + pipeline := derive.NewDerivationPipeline(log, cfg, depSet, l1, blobsSrc, altDASrc, eng, metrics, indexingMode, l1ChainConfig) + pipelineDeriver := derive.NewPipelineDeriver(ctx, pipeline) + sys.Register("pipeline", pipelineDeriver, opts) + ec.SetPipelineResetter(pipelineDeriver) testActionEmitter := sys.Register("test-action", nil, opts) syncStatusTracker := status.NewStatusTracker(log, metrics) sys.Register("status", syncStatusTracker, opts) - sys.Register("sync", &driver.SyncDeriver{ - Derivation: pipeline, - SafeHeadNotifs: safeHeadListener, - CLSync: clSync, - Engine: ec, - SyncCfg: syncCfg, - Config: cfg, - L1: l1, + // TODO(#17115): Refactor dependency cycles + ec.SetCrossUpdateHandler(syncStatusTracker) + + stepDeriver := NewTestingStepSchedulingDeriver() + stepDeriver.AttachEmitter(testActionEmitter) + + syncDeriver := &driver.SyncDeriver{ + Derivation: pipeline, + SafeHeadNotifs: safeHeadListener, + Engine: ec, + SyncCfg: syncCfg, + Config: cfg, + L1: l1, + // No need to initialize L1Tracker because no L1 block cache is used for testing L2: eng, Log: log, Ctx: ctx, ManagedBySupervisor: indexingMode, - }, opts) - - sys.Register("engine", engine.NewEngDeriver(log, ctx, cfg, metrics, ec), opts) + StepDeriver: stepDeriver, + } + // TODO(#16917) Remove Event System Refactor Comments + // Couple SyncDeriver and EngineController for event refactoring + // Couple EngDeriver and NewAttributesHandler for event refactoring + ec.SyncDeriver = syncDeriver + sys.Register("sync", syncDeriver, opts) + sys.Register("engine", ec, opts) rollupNode := &L2Verifier{ eventSys: sys, @@ -193,6 +210,8 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, engine: ec, derivationMetrics: metrics, derivation: pipeline, + syncDeriver: syncDeriver, + finalizer: finalizer, safeHeadListener: safeHeadListener, syncCfg: syncCfg, drainer: executor, @@ -200,6 +219,7 @@ func NewL2Verifier(t Testing, log log.Logger, l1 derive.L1Fetcher, syncStatus: syncStatusTracker, L2PipelineIdle: true, l2Building: false, + L1ChainConfig: l1ChainConfig, RollupCfg: cfg, rpc: rpc.NewServer(), synchronousEvents: testActionEmitter, @@ -240,7 +260,7 @@ func (v *L2Verifier) InteropSyncNode(t Testing) syncnode.SyncNode { require.True(t, ok, "Interop sub-system must be in managed-mode if used as sync-node") auth := rpc.WithHTTPAuth(gnode.NewJWTAuth(m.JWTSecret())) opts := []client.RPCOption{client.WithGethRPCOptions(auth)} - cl, err := client.CheckAndDial(t.Ctx(), v.log, m.WSEndpoint(), auth) + cl, err := client.CheckAndDial(t.Ctx(), v.log, m.WSEndpoint(), 5*time.Second, auth) require.NoError(t, err) t.Cleanup(cl.Close) bCl := client.NewBaseRPCClient(cl) @@ -282,8 +302,7 @@ func (s *l2VerifierBackend) OverrideLeader(ctx context.Context) error { return nil } -func (s *l2VerifierBackend) OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error { - return nil +func (s *l2VerifierBackend) OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) { } func (s *l2VerifierBackend) ConductorEnabled(ctx context.Context) (bool, error) { @@ -354,33 +373,24 @@ func (s *L2Verifier) ActRPCFail(t Testing) { func (s *L2Verifier) ActL1HeadSignal(t Testing) { head, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) - s.synchronousEvents.Emit(t.Ctx(), status.L1UnsafeEvent{L1Unsafe: head}) - require.NoError(t, s.drainer.DrainUntil(func(ev event.Event) bool { - x, ok := ev.(status.L1UnsafeEvent) - return ok && x.L1Unsafe == head - }, false)) + s.syncStatus.OnL1Unsafe(head) + s.syncDeriver.OnL1Unsafe(t.Ctx()) require.Equal(t, head, s.syncStatus.SyncStatus().HeadL1) } func (s *L2Verifier) ActL1SafeSignal(t Testing) { safe, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Safe) require.NoError(t, err) - s.synchronousEvents.Emit(t.Ctx(), status.L1SafeEvent{L1Safe: safe}) - require.NoError(t, s.drainer.DrainUntil(func(ev event.Event) bool { - x, ok := ev.(status.L1SafeEvent) - return ok && x.L1Safe == safe - }, false)) + s.syncStatus.OnL1Safe(safe) require.Equal(t, safe, s.syncStatus.SyncStatus().SafeL1) } func (s *L2Verifier) ActL1FinalizedSignal(t Testing) { finalized, err := s.l1.L1BlockRefByLabel(t.Ctx(), eth.Finalized) require.NoError(t, err) - s.synchronousEvents.Emit(t.Ctx(), finality.FinalizeL1Event{FinalizedL1: finalized}) - require.NoError(t, s.drainer.DrainUntil(func(ev event.Event) bool { - x, ok := ev.(finality.FinalizeL1Event) - return ok && x.FinalizedL1 == finalized - }, false)) + s.syncStatus.OnL1Finalized(finalized) + s.finalizer.OnL1Finalized(finalized) + s.syncDeriver.OnL1Finalized(t.Ctx()) require.Equal(t, finalized, s.syncStatus.SyncStatus().FinalizedL1) } @@ -401,8 +411,6 @@ func (s *L2Verifier) OnEvent(ctx context.Context, ev event.Event) bool { s.L2PipelineIdle = true case derive.PipelineStepEvent: s.L2PipelineIdle = false - case driver.StepReqEvent: - s.synchronousEvents.Emit(ctx, driver.StepEvent{}) default: return false } @@ -442,7 +450,7 @@ func (s *L2Verifier) ActL2PipelineFull(t Testing) { // ActL2UnsafeGossipReceive creates an action that can receive an unsafe execution payload, like gossipsub func (s *L2Verifier) ActL2UnsafeGossipReceive(payload *eth.ExecutionPayloadEnvelope) Action { return func(t Testing) { - s.synchronousEvents.Emit(t.Ctx(), clsync.ReceivedUnsafePayloadEvent{Envelope: payload}) + s.engine.AddUnsafePayload(t.Ctx(), payload) } } @@ -461,3 +469,33 @@ func (s *L2Verifier) SyncSupervisor(t Testing) { _, err := s.InteropControl.PullEvents(t.Ctx()) require.NoError(t, err) } + +type TestingStepSchedulingDeriver struct { + emitter event.Emitter +} + +func NewTestingStepSchedulingDeriver() *TestingStepSchedulingDeriver { + return &TestingStepSchedulingDeriver{} +} + +func (t *TestingStepSchedulingDeriver) NextStep() <-chan struct{} { + return nil +} + +func (t *TestingStepSchedulingDeriver) NextDelayedStep() <-chan time.Time { + return nil +} + +func (t *TestingStepSchedulingDeriver) RequestStep(ctx context.Context, resetBackoff bool) { + t.emitter.Emit(ctx, driver.StepEvent{}) +} + +func (t *TestingStepSchedulingDeriver) AttemptStep(ctx context.Context) { +} + +func (t *TestingStepSchedulingDeriver) ResetStepBackoff(ctx context.Context) { +} + +func (t *TestingStepSchedulingDeriver) AttachEmitter(em event.Emitter) { + t.emitter = em +} diff --git a/op-e2e/actions/helpers/setups.go b/op-e2e/actions/helpers/setups.go index d981bf223459f..375c88bbff1ea 100644 --- a/op-e2e/actions/helpers/setups.go +++ b/op-e2e/actions/helpers/setups.go @@ -29,7 +29,7 @@ func SetupSequencerTest(t Testing, sd *e2eutils.SetupData, log log.Logger, opts l2Cl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) - sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, sd.DependencySet, 0) + sequencer := NewL2Sequencer(t, log.New("role", "sequencer"), l1F, miner.BlobStore(), altda.Disabled, l2Cl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, 0) return miner, engine, sequencer } @@ -42,7 +42,7 @@ func SetupVerifier(t Testing, sd *e2eutils.SetupData, log log.Logger, jwtPath := e2eutils.WriteDefaultJWT(t) engine := NewL2Engine(t, log.New("role", "verifier-engine"), sd.L2Cfg, jwtPath, EngineWithP2P()) engCl := engine.EngineClient(t, sd.RollupCfg) - verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, sd.DependencySet, syncCfg, cfg.SafeHeadListener) + verifier := NewL2Verifier(t, log.New("role", "verifier"), l1F, blobSrc, altda.Disabled, engCl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, syncCfg, cfg.SafeHeadListener) return engine, verifier } diff --git a/op-e2e/actions/helpers/tx_helper.go b/op-e2e/actions/helpers/tx_helper.go index 8174563102c81..591cf9ef0c0f5 100644 --- a/op-e2e/actions/helpers/tx_helper.go +++ b/op-e2e/actions/helpers/tx_helper.go @@ -2,10 +2,12 @@ package helpers import ( "context" + "fmt" "math/big" "time" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/require" @@ -25,25 +27,34 @@ func firstValidTx( var txs []*types.Transaction var q []*types.Transaction // Wait for the tx to be in the pending tx queue - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 31*time.Second) defer cancel() - err := wait.For(ctx, time.Second, func() (bool, error) { + + err := retry.Do0(ctx, 10, retry.Exponential(), func() error { i = pendingIndices(from) txs, q = contentFrom(from) // Remove any transactions that have already been included in the head block // The tx pool only prunes included transactions async so they may still be in the list - nonce, err := nonceAt(ctx, from, nil) + + subCtx, subCancel := context.WithTimeout(ctx, time.Second) + defer subCancel() + + nonce, err := nonceAt(subCtx, from, nil) if err != nil { - return false, err + return err } for len(txs) > 0 && txs[0].Nonce() < nonce { t.Logf("Removing already included transaction from list of length %v", len(txs)) txs = txs[1:] } - return uint64(len(txs)) > i, nil + + if uint64(len(txs)) <= i { + return fmt.Errorf("no pending txs from %s, and have %d unprocessable queued txs from this account", from, len(q)) + } + + return nil }) - require.NoError(t, err, - "no pending txs from %s, and have %d unprocessable queued txs from this account: %w", from, len(q), err) + require.NoError(t, err) return txs[i] } diff --git a/op-e2e/actions/helpers/user.go b/op-e2e/actions/helpers/user.go index 4c1bf04713212..f064ff6170a22 100644 --- a/op-e2e/actions/helpers/user.go +++ b/op-e2e/actions/helpers/user.go @@ -429,25 +429,6 @@ func (s *CrossLayerUser) CheckDepositTx(t Testing, l1TxHash common.Hash, index i } } -func (s *CrossLayerUser) ActStartWithdrawal(t Testing) { - targetAddr := common.Address{} - if s.L1.txToAddr != nil { - targetAddr = *s.L2.txToAddr - } - tx, err := s.L2.env.Bindings.L2ToL1MessagePasser.InitiateWithdrawal(&s.L2.txOpts, targetAddr, new(big.Int).SetUint64(s.L1.txOpts.GasLimit), s.L1.txCallData) - require.NoError(t, err, "create initiate withdraw tx") - err = s.L2.env.EthCl.SendTransaction(t.Ctx(), tx) - require.NoError(t, err, "must send tx") - s.lastL2WithdrawalTxHash = tx.Hash() -} - -// ActCheckStartWithdrawal checks that a previous witdrawal tx was either successful or failed. -func (s *CrossLayerUser) ActCheckStartWithdrawal(success bool) Action { - return func(t Testing) { - s.L2.CheckReceipt(t, success, s.lastL2WithdrawalTxHash) - } -} - func (s *CrossLayerUser) Address() common.Address { return s.L1.address } @@ -529,12 +510,6 @@ func (s *CrossLayerUser) getDisputeGame(t Testing, params withdrawals.ProvenWith return proxy, game.DisputeGameProxy, nil } -// ActCompleteWithdrawal creates a L1 proveWithdrawal tx for latest withdrawal. -// The tx hash is remembered as the last L1 tx, to check as L1 actor. -func (s *CrossLayerUser) ActProveWithdrawal(t Testing) { - s.L1.lastTxHash = s.ProveWithdrawal(t, s.lastL2WithdrawalTxHash) -} - // ProveWithdrawal creates a L1 proveWithdrawal tx for the given L2 withdrawal tx, returning the tx hash. func (s *CrossLayerUser) ProveWithdrawal(t Testing, l2TxHash common.Hash) common.Hash { params, err := s.getLastWithdrawalParams(t) @@ -566,13 +541,6 @@ func (s *CrossLayerUser) ProveWithdrawal(t Testing, l2TxHash common.Hash) common return tx.Hash() } -// ActCompleteWithdrawal creates a L1 withdrawal finalization tx for latest withdrawal. -// The tx hash is remembered as the last L1 tx, to check as L1 actor. -// The withdrawal functions like CompleteWithdrawal -func (s *CrossLayerUser) ActCompleteWithdrawal(t Testing) { - s.L1.lastTxHash = s.CompleteWithdrawal(t, s.lastL2WithdrawalTxHash) -} - // CompleteWithdrawal creates a L1 withdrawal finalization tx for the given L2 withdrawal tx, returning the tx hash. // It's an invalid action to attempt to complete a withdrawal that has not passed the L1 finalization period yet func (s *CrossLayerUser) CompleteWithdrawal(t Testing, l2TxHash common.Hash) common.Hash { diff --git a/op-e2e/actions/helpers/user_test.go b/op-e2e/actions/helpers/user_test.go deleted file mode 100644 index cc312c087482e..0000000000000 --- a/op-e2e/actions/helpers/user_test.go +++ /dev/null @@ -1,373 +0,0 @@ -package helpers - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-e2e/config" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" - bindingspreview "github.com/ethereum-optimism/optimism/op-node/bindings/preview" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" -) - -type hardforkScheduledTest struct { - regolithTime *hexutil.Uint64 - canyonTime *hexutil.Uint64 - deltaTime *hexutil.Uint64 - ecotoneTime *hexutil.Uint64 - fjordTime *hexutil.Uint64 - graniteTime *hexutil.Uint64 - holoceneTime *hexutil.Uint64 - isthmusTime *hexutil.Uint64 - interopTime *hexutil.Uint64 - jovianTime *hexutil.Uint64 - runToFork string - allocType config.AllocType -} - -func (tc *hardforkScheduledTest) SetFork(fork string, v uint64) { - *tc.fork(fork) = (*hexutil.Uint64)(&v) -} - -func (tc *hardforkScheduledTest) GetFork(fork string) *uint64 { - return (*uint64)(*tc.fork(fork)) -} - -func (tc *hardforkScheduledTest) fork(fork string) **hexutil.Uint64 { - switch fork { - case "jovian": - return &tc.jovianTime - case "interop": - return &tc.interopTime - case "isthmus": - return &tc.isthmusTime - case "holocene": - return &tc.holoceneTime - case "granite": - return &tc.graniteTime - case "fjord": - return &tc.fjordTime - case "ecotone": - return &tc.ecotoneTime - case "delta": - return &tc.deltaTime - case "canyon": - return &tc.canyonTime - case "regolith": - return &tc.regolithTime - default: - panic(fmt.Errorf("unrecognized fork: %s", fork)) - } -} - -func TestCrossLayerUser_Default(t *testing.T) { - testCrossLayerUser(t, config.DefaultAllocType) -} - -// TestCrossLayerUser tests that common actions of the CrossLayerUser actor work in various hardfork configurations: -// - transact on L1 -// - transact on L2 -// - deposit on L1 -// - withdraw from L2 -// - prove tx on L1 -// - wait 1 week + 1 second -// - finalize withdrawal on L1 -func testCrossLayerUser(t *testing.T, allocType config.AllocType) { - futureTime := uint64(20) - farFutureTime := uint64(2000) - - forks := []string{ - "regolith", - "canyon", - "delta", - "ecotone", - "fjord", - "granite", - "holocene", - "isthmus", - "interop", - "jovian", - } - for i, fork := range forks { - i := i - fork := fork - t.Run("fork_"+fork, func(t *testing.T) { - t.Run("at_genesis", func(t *testing.T) { - tc := hardforkScheduledTest{ - allocType: allocType, - } - for _, f := range forks[:i+1] { // activate, all up to and incl this fork, at genesis - tc.SetFork(f, 0) - } - runCrossLayerUserTest(t, tc) - }) - t.Run("after_genesis", func(t *testing.T) { - tc := hardforkScheduledTest{ - allocType: allocType, - } - for _, f := range forks[:i] { // activate, all up to this fork, at genesis - tc.SetFork(f, 0) - } - // activate this fork after genesis - tc.SetFork(fork, futureTime) - tc.runToFork = fork - runCrossLayerUserTest(t, tc) - }) - t.Run("not_yet", func(t *testing.T) { - tc := hardforkScheduledTest{ - allocType: allocType, - } - for _, f := range forks[:i] { // activate, all up to this fork, at genesis - tc.SetFork(f, 0) - } - // activate this fork later - tc.SetFork(fork, farFutureTime) - if i > 0 { - tc.runToFork = forks[i-1] - } - runCrossLayerUserTest(t, tc) - }) - }) - } -} - -func runCrossLayerUserTest(gt *testing.T, test hardforkScheduledTest) { - t := NewDefaultTesting(gt) - params := DefaultRollupTestParams() - params.AllocType = test.allocType - dp := e2eutils.MakeDeployParams(t, params) - // This overwrites all deploy-config settings, - // so even when the deploy-config defaults change, we test the right transitions. - dp.DeployConfig.L2GenesisRegolithTimeOffset = test.regolithTime - dp.DeployConfig.L2GenesisCanyonTimeOffset = test.canyonTime - dp.DeployConfig.L2GenesisDeltaTimeOffset = test.deltaTime - dp.DeployConfig.L2GenesisEcotoneTimeOffset = test.ecotoneTime - dp.DeployConfig.L2GenesisFjordTimeOffset = test.fjordTime - dp.DeployConfig.L2GenesisGraniteTimeOffset = test.graniteTime - dp.DeployConfig.L2GenesisHoloceneTimeOffset = test.holoceneTime - dp.DeployConfig.L2GenesisIsthmusTimeOffset = test.isthmusTime - - if test.canyonTime != nil { - require.Zero(t, uint64(*test.canyonTime)%uint64(dp.DeployConfig.L2BlockTime), "canyon fork must be aligned") - } - if test.ecotoneTime != nil { - require.Zero(t, uint64(*test.ecotoneTime)%uint64(dp.DeployConfig.L2BlockTime), "ecotone fork must be aligned") - } - - sd := e2eutils.Setup(t, dp, DefaultAlloc) - log := testlog.Logger(t, log.LevelDebug) - - require.Equal(t, dp.Secrets.Addresses().Batcher, dp.DeployConfig.BatchSenderAddress) - require.Equal(t, dp.Secrets.Addresses().Proposer, dp.DeployConfig.L2OutputOracleProposer) - - miner, seqEngine, seq := SetupSequencerTest(t, sd, log) - batcher := NewL2Batcher(log, sd.RollupCfg, DefaultBatcherCfg(dp), - seq.RollupClient(), miner.EthClient(), seqEngine.EthClient(), seqEngine.EngineClient(t, sd.RollupCfg)) - - var proposer *L2Proposer - if test.allocType.UsesProofs() { - optimismPortal2Contract, err := bindingspreview.NewOptimismPortal2(sd.DeploymentsL1.OptimismPortalProxy, miner.EthClient()) - require.NoError(t, err) - respectedGameType, err := optimismPortal2Contract.RespectedGameType(&bind.CallOpts{}) - require.NoError(t, err) - proposer = NewL2Proposer(t, log, &ProposerCfg{ - DisputeGameFactoryAddr: &sd.DeploymentsL1.DisputeGameFactoryProxy, - ProposalInterval: 6 * time.Second, - ProposalRetryInterval: 3 * time.Second, - DisputeGameType: respectedGameType, - ProposerKey: dp.Secrets.Proposer, - AllowNonFinalized: true, - AllocType: test.allocType, - ChainID: eth.ChainIDFromBig(sd.L1Cfg.Config.ChainID), - }, miner.EthClient(), seq.RollupClient()) - } else { - proposer = NewL2Proposer(t, log, &ProposerCfg{ - OutputOracleAddr: &sd.DeploymentsL1.L2OutputOracleProxy, - ProposerKey: dp.Secrets.Proposer, - ProposalRetryInterval: 3 * time.Second, - AllowNonFinalized: true, - AllocType: test.allocType, - ChainID: eth.ChainIDFromBig(sd.L1Cfg.Config.ChainID), - }, miner.EthClient(), seq.RollupClient()) - } - - // need to start derivation before we can make L2 blocks - seq.ActL2PipelineFull(t) - - l1Cl := miner.EthClient() - l2Cl := seqEngine.EthClient() - l2ProofCl := seqEngine.GethClient() - - addresses := e2eutils.CollectAddresses(sd, dp) - - l1UserEnv := &BasicUserEnv[*L1Bindings]{ - EthCl: l1Cl, - Signer: types.LatestSigner(sd.L1Cfg.Config), - AddressCorpora: addresses, - Bindings: NewL1Bindings(t, l1Cl, test.allocType), - } - l2UserEnv := &BasicUserEnv[*L2Bindings]{ - EthCl: l2Cl, - Signer: types.LatestSigner(sd.L2Cfg.Config), - AddressCorpora: addresses, - Bindings: NewL2Bindings(t, l2Cl, l2ProofCl), - } - - alice := NewCrossLayerUser(log, dp.Secrets.Alice, rand.New(rand.NewSource(1234)), test.allocType) - alice.L1.SetUserEnv(l1UserEnv) - alice.L2.SetUserEnv(l2UserEnv) - - // Build at least one l2 block so we have an unsafe head with a deposit info tx (genesis block doesn't) - seq.ActL2StartBlock(t) - seq.ActL2EndBlock(t) - - if test.runToFork != "" { - forkTime := test.GetFork(test.runToFork) - require.NotNil(t, forkTime, "fork we are running up to must be configured") - // advance L2 enough to activate the fork we are running up to - seq.ActBuildL2ToTime(t, *forkTime) - } - // Check Regolith is active or not by confirming the system info tx is not a system tx - infoTx, err := l2Cl.TransactionInBlock(t.Ctx(), seq.L2Unsafe().Hash, 0) - require.NoError(t, err) - require.True(t, infoTx.IsDepositTx()) - // Should only be a system tx if regolith is not enabled - require.Equal(t, !seq.RollupCfg.IsRegolith(seq.L2Unsafe().Time), infoTx.IsSystemTx()) - - // regular L2 tx, in new L2 block - alice.L2.ActResetTxOpts(t) - alice.L2.ActSetTxToAddr(&dp.Addresses.Bob)(t) - alice.L2.ActMakeTx(t) - seq.ActL2StartBlock(t) - seqEngine.ActL2IncludeTx(alice.Address())(t) - seq.ActL2EndBlock(t) - alice.L2.ActCheckReceiptStatusOfLastTx(true)(t) - - // regular L1 tx, in new L1 block - alice.L1.ActResetTxOpts(t) - alice.L1.ActSetTxToAddr(&dp.Addresses.Bob)(t) - alice.L1.ActMakeTx(t) - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - alice.L1.ActCheckReceiptStatusOfLastTx(true)(t) - - // regular Deposit, in new L1 block - alice.ActDeposit(t) - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - - seq.ActL1HeadSignal(t) - - // sync sequencer build enough blocks to adopt latest L1 origin - for seq.SyncStatus().UnsafeL2.L1Origin.Number < miner.l1Chain.CurrentBlock().Number.Uint64() { - seq.ActL2StartBlock(t) - seq.ActL2EndBlock(t) - } - // Now that the L2 chain adopted the latest L1 block, check that we processed the deposit - alice.ActCheckDepositStatus(true, true)(t) - - // regular withdrawal, in new L2 block - alice.ActStartWithdrawal(t) - seq.ActL2StartBlock(t) - seqEngine.ActL2IncludeTx(alice.Address())(t) - seq.ActL2EndBlock(t) - alice.ActCheckStartWithdrawal(true)(t) - - // build a L1 block and more L2 blocks, - // to ensure the L2 withdrawal is old enough to be able to get into an output root proposal on L1 - miner.ActEmptyBlock(t) - seq.ActL1HeadSignal(t) - seq.ActBuildToL1Head(t) - - // submit everything to L1 - batcher.ActSubmitAll(t) - // include batch on L1 - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(dp.Addresses.Batcher)(t) - miner.ActL1EndBlock(t) - - // derive from L1, blocks will now become safe to propose - seq.ActL2PipelineFull(t) - - // make proposals until there is nothing left to propose - for proposer.CanPropose(t) { - // propose it to L1 - proposer.ActMakeProposalTx(t) - // include proposal on L1 - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(dp.Addresses.Proposer)(t) - miner.ActL1EndBlock(t) - // Check proposal was successful - receipt, err := miner.EthClient().TransactionReceipt(t.Ctx(), proposer.LastProposalTx()) - require.NoError(t, err) - require.Equal(t, types.ReceiptStatusSuccessful, receipt.Status, "proposal failed") - } - - // Mine an empty block so that the timestamp is updated. Otherwise ActProveWithdrawal will fail - // because it tries to estimate gas based on the current timestamp, which is the same timestamp - // as the dispute game creation timestamp, which causes proveWithdrawalTransaction to revert. - miner.ActL1StartBlock(12)(t) - miner.ActL1EndBlock(t) - - // prove our withdrawal on L1 - alice.ActProveWithdrawal(t) - // include proved withdrawal in new L1 block - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - // check withdrawal succeeded - alice.L1.ActCheckReceiptStatusOfLastTx(true)(t) - - // A bit hacky- Mines an empty block with the time delta - // of the finalization period (12s) + 1 in order for the - // withdrawal to be finalized successfully. - miner.ActL1StartBlock(13)(t) - miner.ActL1EndBlock(t) - - // If using fault proofs we need to resolve the game - if test.allocType.UsesProofs() { - // Resolve the root claim - alice.ActResolveClaim(t) - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - // Resolve the game - alice.L1.ActCheckReceiptStatusOfLastTx(true)(t) - alice.ActResolve(t) - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - // Create an empty block to pass the air-gap window - alice.L1.ActCheckReceiptStatusOfLastTx(true)(t) - miner.ActL1StartBlock(13)(t) - miner.ActL1EndBlock(t) - } - - // make the L1 finalize withdrawal tx - alice.ActCompleteWithdrawal(t) - // include completed withdrawal in new L1 block - miner.ActL1StartBlock(12)(t) - miner.ActL1IncludeTx(alice.Address())(t) - miner.ActL1EndBlock(t) - // check withdrawal succeeded - alice.L1.ActCheckReceiptStatusOfLastTx(true)(t) - - // Check Regolith wasn't activated during the test unintentionally - infoTx, err = l2Cl.TransactionInBlock(t.Ctx(), seq.L2Unsafe().Hash, 0) - require.NoError(t, err) - require.True(t, infoTx.IsDepositTx()) - // Should only be a system tx if regolith is not enabled - require.Equal(t, !seq.RollupCfg.IsRegolith(seq.L2Unsafe().Time), infoTx.IsSystemTx()) -} diff --git a/op-e2e/actions/interop/dsl/interop.go b/op-e2e/actions/interop/dsl/interop.go index 0b70b6e5f7d46..21a08c3456b4c 100644 --- a/op-e2e/actions/interop/dsl/interop.go +++ b/op-e2e/actions/interop/dsl/interop.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" altda "github.com/ethereum-optimism/optimism/op-alt-da" @@ -42,6 +43,7 @@ type Chain struct { ChainID eth.ChainID RollupCfg *rollup.Config + L1ChainConfig *params.ChainConfig DependencySet depset.DependencySet L2Genesis *core.Genesis BatcherAddr common.Address @@ -204,8 +206,8 @@ func (is *InteropSetup) CreateActors() *InteropActors { is.T.Cleanup(func() { require.NoError(is.T, supervisorAPI.backend.Stop(context.Background())) }) - chainA := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900200"], is.CfgSet) - chainB := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900201"], is.CfgSet) + chainA := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900200"], is.CfgSet, is.Out.L1.Genesis.Config) + chainB := createL2Services(is.T, is.Log, l1Miner, is.Keys, is.Out.L2s["900201"], is.CfgSet, is.Out.L1.Genesis.Config) // Hook up L2 RPCs to supervisor, to fetch event data from srcA := chainA.Sequencer.InteropSyncNode(is.T) srcB := chainB.Sequencer.InteropSyncNode(is.T) @@ -292,6 +294,7 @@ func createL2Services( keys devkeys.Keys, output *interopgen.L2Output, depSet depset.DependencySet, + l1ChainConfig *params.ChainConfig, ) *Chain { logger = logger.New("chain", output.Genesis.Config.ChainID) @@ -307,8 +310,7 @@ func createL2Services( require.NoError(t, err) seq := helpers.NewL2Sequencer(t, logger.New("role", "sequencer"), l1F, - l1Miner.BlobStore(), altda.Disabled, seqCl, output.RollupCfg, depSet, - 0) + l1Miner.BlobStore(), altda.Disabled, seqCl, output.RollupCfg, l1ChainConfig, depSet, 0) batcherKey, err := keys.Secret(devkeys.ChainOperatorKey{ ChainID: output.Genesis.Config.ChainID, @@ -330,6 +332,7 @@ func createL2Services( return &Chain{ ChainID: eth.ChainIDFromBig(output.Genesis.Config.ChainID), RollupCfg: output.RollupCfg, + L1ChainConfig: l1ChainConfig, DependencySet: depSet, L2Genesis: output.Genesis, BatcherAddr: crypto.PubkeyToAddress(batcherKey.PublicKey), diff --git a/op-e2e/actions/interop/proofs_test.go b/op-e2e/actions/interop/proofs_test.go index 7b5e0487e13cf..a680f0dd8f398 100644 --- a/op-e2e/actions/interop/proofs_test.go +++ b/op-e2e/actions/interop/proofs_test.go @@ -27,6 +27,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -1498,7 +1499,7 @@ func WithInteropEnabled(t helpers.StatefulTesting, actors *dsl.InteropActors, de f.DependencySet = depSet for _, chain := range []*dsl.Chain{actors.ChainA, actors.ChainB} { - verifier, canonicalOnlyEngine := createVerifierWithOnlyCanonicalBlocks(t, actors.L1Miner, chain) + verifier, canonicalOnlyEngine := createVerifierWithOnlyCanonicalBlocks(t, actors.L1Miner, chain, chain.L1ChainConfig) f.L2Sources = append(f.L2Sources, &fpHelpers.FaultProofProgramL2Source{ Node: verifier, Engine: canonicalOnlyEngine, @@ -1510,7 +1511,7 @@ func WithInteropEnabled(t helpers.StatefulTesting, actors *dsl.InteropActors, de // createVerifierWithOnlyCanonicalBlocks creates a new L2Verifier and associated L2Engine that only has the canonical // blocks from chain in its database. Non-canonical blocks, their world state, receipts and other data are not available -func createVerifierWithOnlyCanonicalBlocks(t helpers.StatefulTesting, l1Miner *helpers.L1Miner, chain *dsl.Chain) (*helpers.L2Verifier, *helpers.L2Engine) { +func createVerifierWithOnlyCanonicalBlocks(t helpers.StatefulTesting, l1Miner *helpers.L1Miner, chain *dsl.Chain, l1ChainConfig *params.ChainConfig) (*helpers.L2Verifier, *helpers.L2Engine) { jwtPath := e2eutils.WriteDefaultJWT(t) canonicalOnlyEngine := helpers.NewL2Engine(t, testlog.Logger(t, log.LvlInfo).New("role", "canonicalOnlyEngine"), chain.L2Genesis, jwtPath) head := chain.Sequencer.L2Unsafe() @@ -1548,9 +1549,11 @@ func createVerifierWithOnlyCanonicalBlocks(t helpers.StatefulTesting, l1Miner *h altda.Disabled, canonicalOnlyEngine.EngineClient(t, chain.RollupCfg), chain.RollupCfg, + l1ChainConfig, chain.DependencySet, &sync2.Config{}, - safedb.Disabled) + safedb.Disabled, + ) return verifier, canonicalOnlyEngine } diff --git a/op-e2e/actions/proofs/block_data_hint_test.go b/op-e2e/actions/proofs/block_data_hint_test.go index 2b47bb0ef9835..5a63a7abdaa05 100644 --- a/op-e2e/actions/proofs/block_data_hint_test.go +++ b/op-e2e/actions/proofs/block_data_hint_test.go @@ -152,6 +152,7 @@ func createVerifier(t actionsHelpers.Testing, env *helpers.L2FaultProofEnv) (*ac altda.Disabled, l2EngineCl, env.Sd.RollupCfg, + env.Sd.L1Cfg.Config, env.Sd.DependencySet, &sync.Config{}, safedb.Disabled, diff --git a/op-e2e/actions/proofs/helpers/env.go b/op-e2e/actions/proofs/helpers/env.go index 2c30591d4cec5..c6065985af703 100644 --- a/op-e2e/actions/proofs/helpers/env.go +++ b/op-e2e/actions/proofs/helpers/env.go @@ -72,7 +72,7 @@ func NewL2FaultProofEnv[c any](t helpers.Testing, testCfg *TestCfg[c], tp *e2eut l2EngineCl, err := sources.NewEngineClient(engine.RPCClient(), log, nil, sources.EngineClientDefaultConfig(sd.RollupCfg)) require.NoError(t, err) - sequencer := helpers.NewL2Sequencer(t, log.New("role", "sequencer"), l1Cl, miner.BlobStore(), altda.Disabled, l2EngineCl, sd.RollupCfg, sd.DependencySet, 0) + sequencer := helpers.NewL2Sequencer(t, log.New("role", "sequencer"), l1Cl, miner.BlobStore(), altda.Disabled, l2EngineCl, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, 0) miner.ActL1SetFeeRecipient(common.Address{0xCA, 0xFE, 0xBA, 0xBE}) sequencer.ActL2PipelineFull(t) engCl := engine.EngineClient(t, sd.RollupCfg) @@ -210,13 +210,15 @@ func NewOpProgramCfg( fi *FixtureInputs, ) *config.Config { var rollupConfigs []*rollup.Config - var chainConfigs []*params.ChainConfig + var l2chainConfigs []*params.ChainConfig + var l1chainConfig *params.ChainConfig for _, source := range fi.L2Sources { rollupConfigs = append(rollupConfigs, source.Node.RollupCfg) - chainConfigs = append(chainConfigs, source.ChainConfig) + l2chainConfigs = append(l2chainConfigs, source.ChainConfig) + l1chainConfig = source.Node.L1ChainConfig } - dfault := config.NewConfig(rollupConfigs, chainConfigs, fi.L1Head, fi.L2Head, fi.L2OutputRoot, fi.L2Claim, fi.L2BlockNumber) + dfault := config.NewConfig(rollupConfigs, l2chainConfigs, l1chainConfig, fi.L1Head, fi.L2Head, fi.L2OutputRoot, fi.L2Claim, fi.L2BlockNumber) dfault.L2ChainID = boot.CustomChainIDIndicator if fi.InteropEnabled { dfault.AgreedPrestate = fi.AgreedPrestate diff --git a/op-e2e/actions/proofs/helpers/kona.go b/op-e2e/actions/proofs/helpers/kona.go index 8187e78843998..7d5996ab14af6 100644 --- a/op-e2e/actions/proofs/helpers/kona.go +++ b/op-e2e/actions/proofs/helpers/kona.go @@ -14,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-program/client/claim" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -27,10 +28,25 @@ func IsKonaConfigured() bool { return konaHostPath != "" } +func writeConfigs[T any](t helpers.Testing, workDir string, name string, cfg []*T, cfgPaths []string) { + for i, cfg := range cfg { + cfgPath := filepath.Join(workDir, fmt.Sprintf("%s_%d.json", name, i)) + writeConfig(t, workDir, name, cfg, cfgPath) + cfgPaths[i] = cfgPath + } +} + +func writeConfig[T any](t helpers.Testing, workDir string, name string, cfg *T, cfgPath string) { + ser, err := json.Marshal(cfg) + require.NoError(t, err) + require.NoError(t, os.WriteFile(cfgPath, ser, fs.ModePerm)) +} + func RunKonaNative( t helpers.Testing, workDir string, rollupCfgs []*rollup.Config, + l1chainConfig *params.ChainConfig, l1Rpc string, l1BeaconRpc string, l2Rpcs []string, @@ -38,14 +54,11 @@ func RunKonaNative( ) error { // Write rollup config to tempdir. rollupCfgPaths := make([]string, len(rollupCfgs)) - for i, cfg := range rollupCfgs { - rollupConfigPath := filepath.Join(workDir, fmt.Sprintf("rollup_%d.json", i)) - ser, err := json.Marshal(cfg) - require.NoError(t, err) - require.NoError(t, os.WriteFile(rollupConfigPath, ser, fs.ModePerm)) + writeConfigs(t, workDir, "rollup", rollupCfgs, rollupCfgPaths) - rollupCfgPaths[i] = rollupConfigPath - } + // Write l1 chain config to tempdir. + l1chainConfigPath := filepath.Join(workDir, "l1chain.json") + writeConfig(t, workDir, "l1chain", l1chainConfig, l1chainConfigPath) // Run the fault proof program from the state transition from L2 block L2Blocknumber - 1 -> L2BlockNumber. vmCfg := vm.Config{ @@ -53,6 +66,7 @@ func RunKonaNative( L1Beacon: l1BeaconRpc, L2s: l2Rpcs, RollupConfigPaths: rollupCfgPaths, + L1GenesisPath: l1chainConfigPath, Server: konaHostPath, } inputs := utils.LocalGameInputs{ diff --git a/op-e2e/actions/proofs/helpers/runner.go b/op-e2e/actions/proofs/helpers/runner.go index 8beae9abf5b93..9ae8d408faffb 100644 --- a/op-e2e/actions/proofs/helpers/runner.go +++ b/op-e2e/actions/proofs/helpers/runner.go @@ -83,13 +83,14 @@ func RunFaultProofProgram(t helpers.Testing, logger log.Logger, l1 *helpers.L1Mi defer fakeBeacon.Close() rollupCfgs := make([]*rollup.Config, 0, len(fixtureInputs.L2Sources)) + l1chainConfig := l1.L1Chain().Config() l2Endpoints := make([]string, 0, len(fixtureInputs.L2Sources)) for _, source := range fixtureInputs.L2Sources { rollupCfgs = append(rollupCfgs, source.Node.RollupCfg) l2Endpoints = append(l2Endpoints, source.Engine.HTTPEndpoint()) } - err = RunKonaNative(t, workDir, rollupCfgs, l1.HTTPEndpoint(), fakeBeacon.BeaconAddr(), l2Endpoints, *fixtureInputs) + err = RunKonaNative(t, workDir, rollupCfgs, l1chainConfig, l1.HTTPEndpoint(), fakeBeacon.BeaconAddr(), l2Endpoints, *fixtureInputs) checkResult(t, err) } else { programCfg := NewOpProgramCfg(fixtureInputs) diff --git a/op-e2e/actions/proofs/jovian_activation_test.go b/op-e2e/actions/proofs/jovian_activation_test.go new file mode 100644 index 0000000000000..a36ba2947c2d8 --- /dev/null +++ b/op-e2e/actions/proofs/jovian_activation_test.go @@ -0,0 +1,176 @@ +package proofs + +import ( + "encoding/binary" + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-program/client/claim" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/stretchr/testify/require" +) + +func setMinBaseFeeViaSystemConfig(t actionsHelpers.Testing, env *helpers.L2FaultProofEnv, minBaseFee uint64) { + // Create system config contract binding + systemConfig, err := bindings.NewSystemConfig(env.Sd.RollupCfg.L1SystemConfigAddress, env.Miner.EthClient()) + require.NoError(t, err) + + // Create transactor for the deployer (system config owner) + deployerTx, err := bind.NewKeyedTransactorWithChainID(env.Dp.Secrets.Deployer, env.Sd.RollupCfg.L1ChainID) + require.NoError(t, err) + t.Logf("Setting min base fee on L1: minBaseFee=%d", minBaseFee) + + // Mine the L1 transaction + env.Miner.ActL1StartBlock(12)(t) + _, err = systemConfig.SetMinBaseFee(deployerTx, minBaseFee) + require.NoError(t, err, "SetMinBaseFee transaction failed") + env.Miner.ActL1IncludeTx(env.Dp.Addresses.Deployer)(t) + env.Miner.ActL1EndBlock(t) +} + +func Test_ProgramAction_JovianActivation(gt *testing.T) { + + runJovianDerivationTest := func(gt *testing.T, testCfg *helpers.TestCfg[any], genesisConfigFn func(*genesis.DeployConfig), jovianAtGenesis bool, minBaseFee uint64) { + t := actionsHelpers.NewDefaultTesting(gt) + env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), helpers.NewBatcherCfg(), genesisConfigFn) + t.Logf("L2 Genesis Time: %d, JovianTime: %d ", env.Sequencer.RollupCfg.Genesis.L2Time, *env.Sequencer.RollupCfg.JovianTime) + + if jovianAtGenesis { + // Verify Jovian is active at genesis + require.True(t, env.Sequencer.RollupCfg.IsJovian(env.Sequencer.RollupCfg.Genesis.L2Time), "Jovian should be active at genesis") + } else { + // If Jovian is not activated at genesis, build some blocks up to the activation block + // and verify that the extra data is Holocene + for env.Engine.L2Chain().CurrentBlock().Time < *env.Sequencer.RollupCfg.JovianTime { + b := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + expectedHoloceneExtraData := eip1559.EncodeHoloceneExtraData(250, 6) + require.Equal(t, expectedHoloceneExtraData, b.Extra(), "extra data should match Holocene format") + env.Sequencer.ActL2EmptyBlock(t) + } + } + + // Build the activation block + env.Sequencer.ActL2EmptyBlock(t) + activationBlock := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + require.Equal(t, eip1559.EncodeMinBaseFeeExtraData(250, 6, 0), activationBlock.Extra(), "activation block should have Jovian extraData") + + // Set the minimum base fee + setMinBaseFeeViaSystemConfig(t, env, minBaseFee) + + // Build activation+1 block + env.Sequencer.ActL2EmptyBlock(t) + blockAfterActivation := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + // Assert extradata of the blocks which were past the Jovian activation, but before the L1 origin moved to the SystemConfig change + // It should have a zero min base fee + actualMinBaseFee := binary.BigEndian.Uint64(blockAfterActivation.Extra()[9:17]) + require.Equal(t, uint64(0), actualMinBaseFee, "activation block should have a zero min base fee") + + // Allow L1->L2 derivation to propagate the SystemConfig change & build L2 blocks up to the L1 origin that includes the SystemConfig change + env.Sequencer.ActL1HeadSignal(t) + env.Sequencer.ActL2PipelineFull(t) + env.Sequencer.ActBuildToL1Head(t) + + // Block after the SystemConfig change + env.Sequencer.ActL2EmptyBlock(t) + blockAfterSystemConfigChange := env.Engine.L2Chain().GetBlockByHash(env.Sequencer.L2Unsafe().Hash) + expectedJovianExtraDataWithMinFee := eip1559.EncodeMinBaseFeeExtraData(250, 6, minBaseFee) + require.Equal(t, expectedJovianExtraDataWithMinFee, blockAfterSystemConfigChange.Extra(), "block should have updated Jovian extraData with min base fee") + + // Verify base fee is clamped + require.GreaterOrEqual(t, blockAfterSystemConfigChange.BaseFee().Uint64(), minBaseFee, "base fee should be >= minimum base fee") + + if !jovianAtGenesis { + // Verify Jovian fork activation occurred by checking for the activation log + jovianRecs := env.Logs.FindLogs( + testlog.NewMessageContainsFilter("Detected hardfork activation block"), + testlog.NewAttributesFilter("role", "sequencer"), + testlog.NewAttributesFilter("forkName", "jovian"), + ) + require.Len(t, jovianRecs, 1, "Jovian fork should be detected and activated exactly once") + } + + env.BatchMineAndSync(t) + l2SafeHead := env.Sequencer.L2Safe() + t.Logf("Safe head block number: %d, timestamp: %d", l2SafeHead.Number, l2SafeHead.Time) + require.True(t, l2SafeHead.Number >= uint64(0), "safe head should progress") + + env.RunFaultProofProgramFromGenesis(t, l2SafeHead.Number, testCfg.CheckResult, testCfg.InputParams...) + } + + tests := map[string]struct { + genesisConfigFn func(*genesis.DeployConfig) + jovianAtGenesis bool + minBaseFee uint64 + }{ + "JovianActivationAfterGenesis": { + genesisConfigFn: func(dc *genesis.DeployConfig) { + // Activate Isthmus at genesis + zero := hexutil.Uint64(0) + dc.L2GenesisIsthmusTimeOffset = &zero + // Then set Jovian at 10s + ten := hexutil.Uint64(10) + dc.L2GenesisJovianTimeOffset = &ten + }, + jovianAtGenesis: false, + minBaseFee: 0, + }, + "JovianActivationAtGenesisZeroMinBaseFee": { + genesisConfigFn: func(dc *genesis.DeployConfig) { + zero := hexutil.Uint64(0) + dc.L2GenesisJovianTimeOffset = &zero + }, + jovianAtGenesis: true, + minBaseFee: 0, + }, + "JovianActivationAtGenesisMinBaseFeeMedium": { + genesisConfigFn: func(dc *genesis.DeployConfig) { + zero := hexutil.Uint64(0) + dc.L2GenesisJovianTimeOffset = &zero + }, + jovianAtGenesis: true, + minBaseFee: 1_000_000_000, // 1 gwei + }, + "JovianActivationAtGenesisMinBaseFeeHigh": { + genesisConfigFn: func(dc *genesis.DeployConfig) { + zero := hexutil.Uint64(0) + dc.L2GenesisJovianTimeOffset = &zero + }, + jovianAtGenesis: true, + minBaseFee: 2_000_000_000, // 2 gwei + }, + } + + for name, tt := range tests { + gt.Run(name, func(t *testing.T) { + matrix := helpers.NewMatrix[any]() + defer matrix.Run(t) + + matrix.AddTestCase( + "HonestClaim-"+name, + nil, + helpers.NewForkMatrix(helpers.Isthmus), + func(gt *testing.T, testCfg *helpers.TestCfg[any]) { + runJovianDerivationTest(gt, testCfg, tt.genesisConfigFn, tt.jovianAtGenesis, tt.minBaseFee) + }, + helpers.ExpectNoError(), + ) + matrix.AddTestCase( + "JunkClaim-"+name, + nil, + helpers.NewForkMatrix(helpers.Isthmus), + func(gt *testing.T, testCfg *helpers.TestCfg[any]) { + runJovianDerivationTest(gt, testCfg, tt.genesisConfigFn, tt.jovianAtGenesis, tt.minBaseFee) + }, + helpers.ExpectError(claim.ErrClaimNotValid), + helpers.WithL2Claim(common.HexToHash("0xdeadbeef")), + ) + }) + } +} diff --git a/op-e2e/actions/proofs/l1_blob_parameter_forks_test.go b/op-e2e/actions/proofs/l1_blob_parameter_forks_test.go new file mode 100644 index 0000000000000..a1b4685c2ee08 --- /dev/null +++ b/op-e2e/actions/proofs/l1_blob_parameter_forks_test.go @@ -0,0 +1,141 @@ +package proofs_test + +import ( + "math/big" + "testing" + + batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" + legacybindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/predeploys" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" +) + +// Test_ProgramAction_BlobParameterForks tests the blob base fee calculation for different forks. +func Test_ProgramAction_BlobParameterForks(gt *testing.T) { + runBlobParameterForksTest := func(gt *testing.T, testCfg *helpers.TestCfg[any]) { + t := actionsHelpers.NewDefaultTesting(gt) + + // Create test environment with Fusaka activation + env := helpers.NewL2FaultProofEnv(t, testCfg, helpers.NewTestParams(), + helpers.NewBatcherCfg( + func(c *actionsHelpers.BatcherCfg) { + c.DataAvailabilityType = batcherFlags.BlobsType + }, + ), + func(dp *genesis.DeployConfig) { + dp.L1CancunTimeOffset = ptr(hexutil.Uint64(0)) + dp.L1PragueTimeOffset = ptr(hexutil.Uint64(12)) + dp.L1OsakaTimeOffset = ptr(hexutil.Uint64(24)) + dp.L1BPO1TimeOffset = ptr(hexutil.Uint64(36)) + dp.L1BPO2TimeOffset = ptr(hexutil.Uint64(48)) + dp.L1BPO3TimeOffset = ptr(hexutil.Uint64(60)) + dp.L1BPO4TimeOffset = ptr(hexutil.Uint64(72)) + dp.L1BlobScheduleConfig = ¶ms.BlobScheduleConfig{ + Cancun: params.DefaultCancunBlobConfig, + Osaka: params.DefaultOsakaBlobConfig, + Prague: params.DefaultPragueBlobConfig, + BPO1: params.DefaultBPO1BlobConfig, + BPO2: params.DefaultBPO2BlobConfig, + BPO3: params.DefaultBPO3BlobConfig, + BPO4: params.DefaultBPO4BlobConfig, + } + dp.L1GenesisBlockExcessBlobGas = ptr(hexutil.Uint64(1e8)) // Jack up the blob market so we can test the blob fee calculation + }, + ) + + miner, sequencer := env.Miner, env.Sequencer + + // Bind to L1Block contract on L2 + l1BlockContract, err := legacybindings.NewL1Block(predeploys.L1BlockAddr, env.Engine.EthClient()) + require.NoError(t, err) + + atBlockWithHash := func(hash common.Hash) *bind.CallOpts { + return &bind.CallOpts{ + BlockHash: hash, + } + } + + // requireConsistentBlobBaseFeeForFork requires the blob base fee to be consistent between + // the L1 Origin block (computed using the excess blob gas and l1 chain config) + // and the L1 Block contract on L2 (accessed with a contract method call), for a given fork predicate. + requireConsistentBlobBaseFeeForFork := func(t actionsHelpers.StatefulTesting, l2Block eth.L2BlockRef, expectActive bool, label string, isActive func(num *big.Int, time uint64) bool) { + bbfL2, err := l1BlockContract.BlobBaseFee(atBlockWithHash(l2Block.Hash)) + require.NoError(t, err) + + l1Origin := miner.L1Chain().GetHeaderByHash(l2Block.L1Origin.Hash) + if expectActive { + require.True(t, isActive(l1Origin.Number, l1Origin.Time), "%s not active at l1 origin %d, time %d", label, l1Origin.Number, l1Origin.Time) + } else { + require.False(t, isActive(l1Origin.Number, l1Origin.Time), "%s should not be active at l1 origin %d, time %d", label, l1Origin.Number, l1Origin.Time) + } + bbfL1 := eip4844.CalcBlobFee(env.Sd.L1Cfg.Config, l1Origin) + + require.True(t, bbfL2.Cmp(bbfL1) == 0, + "%s: blob base fee does not match, bbfL2=%d, bbfL1=%d, l1BlockNum=%d, l2BlockNum=%d", label, bbfL2, bbfL1, l1Origin.Number, l2Block.Number) + + require.True(t, bbfL2.Cmp(big.NewInt(1)) > 0, + "%s: blob base fee is unrealistically low and doesn't exercise the blob fee calculation", label) + } + + // buildL1ToTime advances L1 with empty blocks until the given fork time. + buildL1ToTime := func(t actionsHelpers.StatefulTesting, forkTime *uint64) { + require.NotNil(t, forkTime, "fork time must be configured") + h := miner.L1Chain().CurrentHeader() + for h.Time < *forkTime { + h = miner.ActEmptyBlock(t).Header() + } + } + + // Iterate through all forks and assert pre/post activation blob fees match expectations + cfg := env.Sd.L1Cfg.Config + forks := []struct { + label string + forkTime *uint64 + isActive func(num *big.Int, time uint64) bool + }{ + {"Prague", cfg.PragueTime, func(num *big.Int, time uint64) bool { return cfg.IsPrague(num, time) }}, + {"Osaka", cfg.OsakaTime, func(num *big.Int, time uint64) bool { return cfg.IsOsaka(num, time) }}, + {"BPO1", cfg.BPO1Time, func(num *big.Int, time uint64) bool { return cfg.IsBPO1(num, time) }}, + {"BPO2", cfg.BPO2Time, func(num *big.Int, time uint64) bool { return cfg.IsBPO2(num, time) }}, + {"BPO3", cfg.BPO3Time, func(num *big.Int, time uint64) bool { return cfg.IsBPO3(num, time) }}, + {"BPO4", cfg.BPO4Time, func(num *big.Int, time uint64) bool { return cfg.IsBPO4(num, time) }}, + } + for _, f := range forks { + // Advance L1 to fork activation + buildL1ToTime(t, f.forkTime) + + // Build an empty L2 block which still has a pre-fork L1 origin, and check blob fee + sequencer.ActL2EmptyBlock(t) + l2Block := sequencer.SyncStatus().UnsafeL2 + requireConsistentBlobBaseFeeForFork(t, l2Block, false, f.label, f.isActive) + + // Advance L2 chain until L1 origin is at/after the fork activation + sequencer.ActL1HeadSignal(t) + sequencer.ActBuildToL1HeadUnsafe(t) + + l2Block = sequencer.L2Unsafe() + require.Greater(t, l2Block.Number, uint64(1)) + requireConsistentBlobBaseFeeForFork(t, l2Block, true, f.label, f.isActive) + } + + // Final sync + env.BatchMineAndSync(t) + + // Run fault proof program + safeL2Head := sequencer.L2Safe() + env.RunFaultProofProgramFromGenesis(t, safeL2Head.Number, testCfg.CheckResult, testCfg.InputParams...) + } + + matrix := helpers.NewMatrix[any]() + matrix.AddDefaultTestCases(nil, helpers.NewForkMatrix(helpers.LatestFork), runBlobParameterForksTest) + matrix.Run(gt) +} diff --git a/op-e2e/actions/proofs/l1_prague_fork_test.go b/op-e2e/actions/proofs/l1_prague_fork_test.go index d9fcaf47aa51e..f200fb3c1b1a0 100644 --- a/op-e2e/actions/proofs/l1_prague_fork_test.go +++ b/op-e2e/actions/proofs/l1_prague_fork_test.go @@ -7,12 +7,8 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" actionsHelpers "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/ethereum-optimism/optimism/op-e2e/actions/proofs/helpers" - legacybindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/predeploys" - "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -44,10 +40,7 @@ func Test_ProgramAction_PragueForkAfterGenesis(gt *testing.T) { }, ) - miner, batcher, verifier, sequencer, engine := env.Miner, env.Batcher, env.Sequencer, env.Sequencer, env.Engine - - l1Block, err := legacybindings.NewL1Block(predeploys.L1BlockAddr, engine.EthClient()) - require.NoError(t, err) + miner, batcher, verifier, sequencer, _ := env.Miner, env.Batcher, env.Sequencer, env.Sequencer, env.Engine // utils checkVerifierDerivedToL1Head := func(t actionsHelpers.StatefulTesting) { @@ -89,17 +82,6 @@ func Test_ProgramAction_PragueForkAfterGenesis(gt *testing.T) { checkVerifierDerivedToL1Head(t) } - checkL1BlockBlobBaseFee := func(t actionsHelpers.StatefulTesting, l2Block eth.L2BlockRef) { - l1BlockID := l2Block.L1Origin - l1BlockHeader := miner.L1Chain().GetHeaderByHash(l1BlockID.Hash) - expectedBbf := eth.CalcBlobFeeDefault(l1BlockHeader) - upstreamExpectedBbf := eip4844.CalcBlobFee(env.Sd.L1Cfg.Config, l1BlockHeader) - require.Equal(t, expectedBbf.Uint64(), upstreamExpectedBbf.Uint64(), "expected blob base fee should match upstream calculation") - bbf, err := l1Block.BlobBaseFee(&bind.CallOpts{BlockHash: l2Block.Hash}) - require.NoError(t, err, "failed to get blob base fee") - require.Equal(t, expectedBbf.Uint64(), bbf.Uint64(), "l1Block blob base fee does not match expectation, l1BlockNum %d, l2BlockNum %d", l1BlockID.Number, l2Block.Number) - } - requireSafeHeadProgression := func(t actionsHelpers.StatefulTesting, safeL2Before, safeL2After eth.L2BlockRef, batchedWithSetCodeTx bool) { if batchedWithSetCodeTx { require.Equal(t, safeL2Before, safeL2After, "safe head should not have changed (SetCode / type 4 batcher tx ignored)") @@ -144,11 +126,10 @@ func Test_ProgramAction_PragueForkAfterGenesis(gt *testing.T) { // Cache safe head before verifier sync safeL2Initial := verifier.SyncStatus().SafeL2 - // Build an empty L2 block which has a pre-prague L1 origin, and check the blob fee is correct + // Build an empty L2 block which has a pre-prague L1 origin sequencer.ActL2EmptyBlock(t) l1OriginHeader := miner.L1Chain().GetHeaderByHash(verifier.SyncStatus().UnsafeL2.L1Origin.Hash) requirePragueStatusOnL1(false, l1OriginHeader) - checkL1BlockBlobBaseFee(t, verifier.SyncStatus().UnsafeL2) // Build L2 unsafe chain and batch it to L1 using either DynamicFee or // EIP-7702 SetCode txs @@ -165,10 +146,9 @@ func Test_ProgramAction_PragueForkAfterGenesis(gt *testing.T) { sequencer.ActBuildToL1Head(t) // Advance L2 chain until L1 origin has Prague active - // Check that the l1 origin is now a Prague block, and that the blob fee is correct + // Check that the l1 origin is now a Prague block l1Origin := miner.L1Chain().GetHeaderByNumber(verifier.SyncStatus().UnsafeL2.L1Origin.Number) requirePragueStatusOnL1(true, l1Origin) - checkL1BlockBlobBaseFee(t, verifier.SyncStatus().UnsafeL2) // Batch and sync again buildUnsafeL2AndSubmit(testCfg.Custom.useSetCodeTx) diff --git a/op-e2e/actions/proofs/pectra_blob_schedule_test.go b/op-e2e/actions/proofs/pectra_blob_schedule_test.go index 87571b050374e..71b9bba7b10f4 100644 --- a/op-e2e/actions/proofs/pectra_blob_schedule_test.go +++ b/op-e2e/actions/proofs/pectra_blob_schedule_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" ) type pectraBlobScheduleTestCfg struct { @@ -82,7 +83,7 @@ func testPectraBlobSchedule(gt *testing.T, testCfg *helpers.TestCfg[any]) { sequencer.ActBuildToL1HeadUnsafe(t) cancunBBF1 := eth.CalcBlobFeeCancun(*l1_1.ExcessBlobGas) - pragueBBF1 := eth.CalcBlobFeeDefault(l1_1) + pragueBBF1 := eip4844.CalcBlobFee(env.Sd.L1Cfg.Config, l1_1) // Make sure they differ. require.Less(t, pragueBBF1.Uint64(), cancunBBF1.Uint64()) opts := &bind.CallOpts{} @@ -113,7 +114,7 @@ func testPectraBlobSchedule(gt *testing.T, testCfg *helpers.TestCfg[any]) { sequencer.ActBuildToL1HeadUnsafe(t) cancunBBF2 := eth.CalcBlobFeeCancun(*l1_2.ExcessBlobGas) - pragueBBF2 := eth.CalcBlobFeeDefault(l1_2) + pragueBBF2 := eip4844.CalcBlobFee(env.Sd.L1Cfg.Config, l1_2) require.Less(t, pragueBBF2.Uint64(), cancunBBF2.Uint64()) bbf2, err := l1Block.BlobBaseFee(opts) require.NoError(t, err) diff --git a/op-e2e/actions/sequencer/l2_sequencer_test.go b/op-e2e/actions/sequencer/l2_sequencer_test.go index 7a48d24a5c286..f4f564b3b4018 100644 --- a/op-e2e/actions/sequencer/l2_sequencer_test.go +++ b/op-e2e/actions/sequencer/l2_sequencer_test.go @@ -180,7 +180,7 @@ func TestL2SequencerAPI(gt *testing.T) { l2Cl := seqEng.EngineClient(t, cfg) // Prepare a block - fb := derive.NewFetchingAttributesBuilder(cfg, sd.DependencySet, l1Cl, l2Cl) + fb := derive.NewFetchingAttributesBuilder(cfg, sd.L1Cfg.Config, sd.DependencySet, l1Cl, l2Cl) parent, err := l2Cl.L2BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) l1Origin := parent.L1Origin // repeat the L1 origin diff --git a/op-e2e/actions/sync/sync_test.go b/op-e2e/actions/sync/sync_test.go index 86fb80e939846..6786b878a5fe1 100644 --- a/op-e2e/actions/sync/sync_test.go +++ b/op-e2e/actions/sync/sync_test.go @@ -644,7 +644,9 @@ func PerformELSyncAndCheckPayloads(t actionsHelpers.Testing, miner *actionsHelpe // Insert it on the verifier seqHead, err := seqEngCl.PayloadByLabel(t.Ctx(), eth.Unsafe) require.NoError(t, err) - seqStart, err := seqEngCl.PayloadByNumber(t.Ctx(), from) + // Must check with block which is not genesis + startBlockNum := from + 1 + seqStart, err := seqEngCl.PayloadByNumber(t.Ctx(), startBlockNum) require.NoError(t, err) verifier.ActL2InsertUnsafePayload(seqHead)(t) @@ -657,10 +659,10 @@ func PerformELSyncAndCheckPayloads(t actionsHelpers.Testing, miner *actionsHelpe ) // Expect snap sync to download & execute the entire chain - // Verify this by checking that the verifier has the correct value for block 1 + // Verify this by checking that the verifier has the correct value for block startBlockNum require.Eventually(t, func() bool { - block, err := verifier.Eng.L2BlockRefByNumber(t.Ctx(), from) + block, err := verifier.Eng.L2BlockRefByNumber(t.Ctx(), startBlockNum) if err != nil { return false } @@ -821,7 +823,7 @@ func TestELSyncTransitionsToCLSyncAfterNodeRestart(gt *testing.T) { PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp) // Create a new verifier which is essentially a new op-node with the sync mode of ELSync and default geth engine kind. - verifier = actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, sd.DependencySet, &sync.Config{SyncMode: sync.ELSync}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener) + verifier = actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, &sync.Config{SyncMode: sync.ELSync}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener) // Build another 10 L1 blocks on the sequencer for i := 0; i < 10; i++ { @@ -863,7 +865,7 @@ func TestForcedELSyncCLAfterNodeRestart(gt *testing.T) { PrepareELSyncedNode(t, miner, sequencer, seqEng, verifier, verEng, seqEngCl, batcher, dp) // Create a new verifier which is essentially a new op-node with the sync mode of ELSync and erigon engine kind. - verifier2 := actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, sd.DependencySet, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener) + verifier2 := actionsHelpers.NewL2Verifier(t, captureLog, miner.L1Client(t, sd.RollupCfg), miner.BlobStore(), altda.Disabled, verifier.Eng, sd.RollupCfg, sd.L1Cfg.Config, sd.DependencySet, &sync.Config{SyncMode: sync.ELSync, SupportsPostFinalizationELSync: true}, actionsHelpers.DefaultVerifierCfg().SafeHeadListener) // Build another 10 L1 blocks on the sequencer for i := 0; i < 10; i++ { @@ -1052,8 +1054,7 @@ func TestSpanBatchAtomicity_Consolidation(gt *testing.T) { require.Equal(t, verifier.L2Safe().Number, uint64(0)) } else { // Make sure we do the post-processing of what safety updates might happen - // after the pending-safe event, before the next pending-safe event. - verifier.ActL2EventsUntil(t, event.Is[engine2.PendingSafeUpdateEvent], 100, true) + verifier.ActL2PipelineFull(t) // Once the span batch is fully processed, the safe head must advance to the end of span batch. require.Equal(t, verifier.L2Safe().Number, targetHeadNumber) require.Equal(t, verifier.L2Safe(), verifier.L2PendingSafe()) diff --git a/op-e2e/actions/upgrades/dencun_fork_test.go b/op-e2e/actions/upgrades/dencun_fork_test.go index 52cd76523844e..13c88fefc77b0 100644 --- a/op-e2e/actions/upgrades/dencun_fork_test.go +++ b/op-e2e/actions/upgrades/dencun_fork_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" @@ -124,12 +125,7 @@ func TestDencunL2ForkAfterGenesis(gt *testing.T) { dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) require.Zero(t, *dp.DeployConfig.L1CancunTimeOffset) // This test will fork on the second block - offset := hexutil.Uint64(dp.DeployConfig.L2BlockTime * 2) - dp.DeployConfig.L2GenesisEcotoneTimeOffset = &offset - dp.DeployConfig.L2GenesisFjordTimeOffset = nil - dp.DeployConfig.L2GenesisGraniteTimeOffset = nil - dp.DeployConfig.L2GenesisHoloceneTimeOffset = nil - // New forks have to be added here, after changing the default deploy config! + dp.DeployConfig.ActivateForkAtOffset(rollup.Ecotone, dp.DeployConfig.L2BlockTime*2) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) log := testlog.Logger(t, log.LevelDebug) diff --git a/op-e2e/actions/upgrades/ecotone_fork_test.go b/op-e2e/actions/upgrades/ecotone_fork_test.go index 19b36eb91f471..09aa995fc3607 100644 --- a/op-e2e/actions/upgrades/ecotone_fork_test.go +++ b/op-e2e/actions/upgrades/ecotone_fork_test.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum-optimism/optimism/op-service/testlog" @@ -43,17 +44,13 @@ func verifyCodeHashMatches(t helpers.Testing, client *ethclient.Client, address func TestEcotoneNetworkUpgradeTransactions(gt *testing.T) { t := helpers.NewDefaultTesting(gt) dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) - ecotoneOffset := hexutil.Uint64(4) + ecotoneOffset := 4 log := testlog.Logger(t, log.LevelDebug) require.Zero(t, *dp.DeployConfig.L1CancunTimeOffset) // Activate all forks at genesis, and schedule Ecotone the block after - dp.DeployConfig.L2GenesisEcotoneTimeOffset = &ecotoneOffset - dp.DeployConfig.L2GenesisFjordTimeOffset = nil - dp.DeployConfig.L2GenesisGraniteTimeOffset = nil - dp.DeployConfig.L2GenesisHoloceneTimeOffset = nil - // New forks have to be added here... + dp.DeployConfig.ActivateForkAtOffset(rollup.Ecotone, uint64(ecotoneOffset)) require.NoError(t, dp.DeployConfig.Check(log), "must have valid config") sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) diff --git a/op-e2e/actions/upgrades/fjord_fork_test.go b/op-e2e/actions/upgrades/fjord_fork_test.go index bb15e76134c0c..57ac95d091813 100644 --- a/op-e2e/actions/upgrades/fjord_fork_test.go +++ b/op-e2e/actions/upgrades/fjord_fork_test.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/predeploys" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -19,6 +18,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/testlog" ) @@ -32,21 +32,13 @@ var ( func TestFjordNetworkUpgradeTransactions(gt *testing.T) { t := helpers.NewDefaultTesting(gt) dp := e2eutils.MakeDeployParams(t, helpers.DefaultRollupTestParams()) - genesisBlock := hexutil.Uint64(0) - fjordOffset := hexutil.Uint64(2) log := testlog.Logger(t, log.LvlDebug) - dp.DeployConfig.L1CancunTimeOffset = &genesisBlock // can be removed once Cancun on L1 is the default - // Activate all forks at genesis, and schedule Fjord the block after - dp.DeployConfig.L2GenesisRegolithTimeOffset = &genesisBlock - dp.DeployConfig.L2GenesisCanyonTimeOffset = &genesisBlock - dp.DeployConfig.L2GenesisDeltaTimeOffset = &genesisBlock - dp.DeployConfig.L2GenesisEcotoneTimeOffset = &genesisBlock - dp.DeployConfig.L2GenesisFjordTimeOffset = &fjordOffset - dp.DeployConfig.L2GenesisGraniteTimeOffset = nil - dp.DeployConfig.L2GenesisHoloceneTimeOffset = nil + fjordOffset := uint64(2) + dp.DeployConfig.ActivateForkAtOffset(rollup.Fjord, fjordOffset) + require.NoError(t, dp.DeployConfig.Check(log), "must have valid config") sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) diff --git a/op-e2e/bindings/opcontractsmanager.go b/op-e2e/bindings/opcontractsmanager.go index e6a38eac6ef00..26dea347ff642 100644 --- a/op-e2e/bindings/opcontractsmanager.go +++ b/op-e2e/bindings/opcontractsmanager.go @@ -111,6 +111,7 @@ type OPContractsManagerImplementations struct { ProtocolVersionsImpl common.Address L1ERC721BridgeImpl common.Address OptimismPortalImpl common.Address + OptimismPortalInteropImpl common.Address EthLockboxImpl common.Address SystemConfigImpl common.Address OptimismMintableERC20FactoryImpl common.Address @@ -158,6 +159,20 @@ type OPContractsManagerRoles struct { Challenger common.Address } +// OPContractsManagerStandardValidatorValidationInput is an auto generated low-level Go binding around an user-defined struct. +type OPContractsManagerStandardValidatorValidationInput struct { + ProxyAdmin common.Address + SysCfg common.Address + AbsolutePrestate [32]byte + L2ChainID *big.Int +} + +// OPContractsManagerStandardValidatorValidationOverrides is an auto generated low-level Go binding around an user-defined struct. +type OPContractsManagerStandardValidatorValidationOverrides struct { + L1PAOMultisig common.Address + Challenger common.Address +} + // Proposal is an auto generated low-level Go binding around an user-defined struct. type Proposal struct { Root [32]byte @@ -166,13 +181,35 @@ type Proposal struct { // OPContractsManagerMetaData contains all meta data concerning the OPContractsManager contract. var OPContractsManagerMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"contractOPContractsManagerGameTypeAdder\",\"name\":\"_opcmGameTypeAdder\",\"type\":\"address\"},{\"internalType\":\"contractOPContractsManagerDeployer\",\"name\":\"_opcmDeployer\",\"type\":\"address\"},{\"internalType\":\"contractOPContractsManagerUpgrader\",\"name\":\"_opcmUpgrader\",\"type\":\"address\"},{\"internalType\":\"contractOPContractsManagerInteropMigrator\",\"name\":\"_opcmInteropMigrator\",\"type\":\"address\"},{\"internalType\":\"contractISuperchainConfig\",\"name\":\"_superchainConfig\",\"type\":\"address\"},{\"internalType\":\"contractIProtocolVersions\",\"name\":\"_protocolVersions\",\"type\":\"address\"},{\"internalType\":\"contractIProxyAdmin\",\"name\":\"_superchainProxyAdmin\",\"type\":\"address\"},{\"internalType\":\"string\",\"name\":\"_l1ContractsRelease\",\"type\":\"string\"},{\"internalType\":\"address\",\"name\":\"_upgradeController\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"components\":[{\"internalType\":\"string\",\"name\":\"saltMixer\",\"type\":\"string\"},{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfig\",\"type\":\"address\"},{\"internalType\":\"contractIProxyAdmin\",\"name\":\"proxyAdmin\",\"type\":\"address\"},{\"internalType\":\"contractIDelayedWETH\",\"name\":\"delayedWETH\",\"type\":\"address\"},{\"internalType\":\"GameType\",\"name\":\"disputeGameType\",\"type\":\"uint32\"},{\"internalType\":\"Claim\",\"name\":\"disputeAbsolutePrestate\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"disputeMaxGameDepth\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"disputeSplitDepth\",\"type\":\"uint256\"},{\"internalType\":\"Duration\",\"name\":\"disputeClockExtension\",\"type\":\"uint64\"},{\"internalType\":\"Duration\",\"name\":\"disputeMaxClockDuration\",\"type\":\"uint64\"},{\"internalType\":\"uint256\",\"name\":\"initialBond\",\"type\":\"uint256\"},{\"internalType\":\"contractIBigStepper\",\"name\":\"vm\",\"type\":\"address\"},{\"internalType\":\"bool\",\"name\":\"permissioned\",\"type\":\"bool\"}],\"internalType\":\"structOPContractsManager.AddGameInput[]\",\"name\":\"_gameConfigs\",\"type\":\"tuple[]\"}],\"name\":\"addGameType\",\"outputs\":[{\"components\":[{\"internalType\":\"contractIDelayedWETH\",\"name\":\"delayedWETH\",\"type\":\"address\"},{\"internalType\":\"contractIFaultDisputeGame\",\"name\":\"faultDisputeGame\",\"type\":\"address\"}],\"internalType\":\"structOPContractsManager.AddGameOutput[]\",\"name\":\"\",\"type\":\"tuple[]\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"blueprints\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"addressManager\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proxyAdmin\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1ChugSplashProxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"resolvedDelegateProxy\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"permissionedDisputeGame1\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"permissionedDisputeGame2\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"permissionlessDisputeGame1\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"permissionlessDisputeGame2\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"superPermissionedDisputeGame1\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"superPermissionedDisputeGame2\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"superPermissionlessDisputeGame1\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"superPermissionlessDisputeGame2\",\"type\":\"address\"}],\"internalType\":\"structOPContractsManager.Blueprints\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l2ChainId\",\"type\":\"uint256\"}],\"name\":\"chainIdToBatchInboxAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"opChainProxyAdminOwner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"systemConfigOwner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"batcher\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"unsafeBlockSigner\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"proposer\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"challenger\",\"type\":\"address\"}],\"internalType\":\"structOPContractsManager.Roles\",\"name\":\"roles\",\"type\":\"tuple\"},{\"internalType\":\"uint32\",\"name\":\"basefeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"blobBasefeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"uint256\",\"name\":\"l2ChainId\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"startingAnchorRoot\",\"type\":\"bytes\"},{\"internalType\":\"string\",\"name\":\"saltMixer\",\"type\":\"string\"},{\"internalType\":\"uint64\",\"name\":\"gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"GameType\",\"name\":\"disputeGameType\",\"type\":\"uint32\"},{\"internalType\":\"Claim\",\"name\":\"disputeAbsolutePrestate\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"disputeMaxGameDepth\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"disputeSplitDepth\",\"type\":\"uint256\"},{\"internalType\":\"Duration\",\"name\":\"disputeClockExtension\",\"type\":\"uint64\"},{\"internalType\":\"Duration\",\"name\":\"disputeMaxClockDuration\",\"type\":\"uint64\"}],\"internalType\":\"structOPContractsManager.DeployInput\",\"name\":\"_input\",\"type\":\"tuple\"}],\"name\":\"deploy\",\"outputs\":[{\"components\":[{\"internalType\":\"contractIProxyAdmin\",\"name\":\"opChainProxyAdmin\",\"type\":\"address\"},{\"internalType\":\"contractIAddressManager\",\"name\":\"addressManager\",\"type\":\"address\"},{\"internalType\":\"contractIL1ERC721Bridge\",\"name\":\"l1ERC721BridgeProxy\",\"type\":\"address\"},{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfigProxy\",\"type\":\"address\"},{\"internalType\":\"contractIOptimismMintableERC20Factory\",\"name\":\"optimismMintableERC20FactoryProxy\",\"type\":\"address\"},{\"internalType\":\"contractIL1StandardBridge\",\"name\":\"l1StandardBridgeProxy\",\"type\":\"address\"},{\"internalType\":\"contractIL1CrossDomainMessenger\",\"name\":\"l1CrossDomainMessengerProxy\",\"type\":\"address\"},{\"internalType\":\"contractIETHLockbox\",\"name\":\"ethLockboxProxy\",\"type\":\"address\"},{\"internalType\":\"contractIOptimismPortal2\",\"name\":\"optimismPortalProxy\",\"type\":\"address\"},{\"internalType\":\"contractIDisputeGameFactory\",\"name\":\"disputeGameFactoryProxy\",\"type\":\"address\"},{\"internalType\":\"contractIAnchorStateRegistry\",\"name\":\"anchorStateRegistryProxy\",\"type\":\"address\"},{\"internalType\":\"contractIFaultDisputeGame\",\"name\":\"faultDisputeGame\",\"type\":\"address\"},{\"internalType\":\"contractIPermissionedDisputeGame\",\"name\":\"permissionedDisputeGame\",\"type\":\"address\"},{\"internalType\":\"contractIDelayedWETH\",\"name\":\"delayedWETHPermissionedGameProxy\",\"type\":\"address\"},{\"internalType\":\"contractIDelayedWETH\",\"name\":\"delayedWETHPermissionlessGameProxy\",\"type\":\"address\"}],\"internalType\":\"structOPContractsManager.DeployOutput\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"implementations\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"superchainConfigImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"protocolVersionsImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1ERC721BridgeImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismPortalImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"ethLockboxImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"systemConfigImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismMintableERC20FactoryImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1CrossDomainMessengerImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1StandardBridgeImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"disputeGameFactoryImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"anchorStateRegistryImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"delayedWETHImpl\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"mipsImpl\",\"type\":\"address\"}],\"internalType\":\"structOPContractsManager.Implementations\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"isRC\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1ContractsRelease\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"bool\",\"name\":\"usePermissionlessGame\",\"type\":\"bool\"},{\"components\":[{\"internalType\":\"Hash\",\"name\":\"root\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"l2SequenceNumber\",\"type\":\"uint256\"}],\"internalType\":\"structProposal\",\"name\":\"startingAnchorRoot\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"proposer\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"challenger\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"maxGameDepth\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"splitDepth\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"initBond\",\"type\":\"uint256\"},{\"internalType\":\"Duration\",\"name\":\"clockExtension\",\"type\":\"uint64\"},{\"internalType\":\"Duration\",\"name\":\"maxClockDuration\",\"type\":\"uint64\"}],\"internalType\":\"structOPContractsManagerInteropMigrator.GameParameters\",\"name\":\"gameParameters\",\"type\":\"tuple\"},{\"components\":[{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfigProxy\",\"type\":\"address\"},{\"internalType\":\"contractIProxyAdmin\",\"name\":\"proxyAdmin\",\"type\":\"address\"},{\"internalType\":\"Claim\",\"name\":\"absolutePrestate\",\"type\":\"bytes32\"}],\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"name\":\"opChainConfigs\",\"type\":\"tuple[]\"}],\"internalType\":\"structOPContractsManagerInteropMigrator.MigrateInput\",\"name\":\"_input\",\"type\":\"tuple\"}],\"name\":\"migrate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"opcmDeployer\",\"outputs\":[{\"internalType\":\"contractOPContractsManagerDeployer\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"opcmGameTypeAdder\",\"outputs\":[{\"internalType\":\"contractOPContractsManagerGameTypeAdder\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"opcmInteropMigrator\",\"outputs\":[{\"internalType\":\"contractOPContractsManagerInteropMigrator\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"opcmUpgrader\",\"outputs\":[{\"internalType\":\"contractOPContractsManagerUpgrader\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"protocolVersions\",\"outputs\":[{\"internalType\":\"contractIProtocolVersions\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bool\",\"name\":\"_isRC\",\"type\":\"bool\"}],\"name\":\"setRC\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"superchainConfig\",\"outputs\":[{\"internalType\":\"contractISuperchainConfig\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"superchainProxyAdmin\",\"outputs\":[{\"internalType\":\"contractIProxyAdmin\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfigProxy\",\"type\":\"address\"},{\"internalType\":\"contractIProxyAdmin\",\"name\":\"proxyAdmin\",\"type\":\"address\"},{\"internalType\":\"Claim\",\"name\":\"absolutePrestate\",\"type\":\"bytes32\"}],\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"name\":\"_prestateUpdateInputs\",\"type\":\"tuple[]\"}],\"name\":\"updatePrestate\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfigProxy\",\"type\":\"address\"},{\"internalType\":\"contractIProxyAdmin\",\"name\":\"proxyAdmin\",\"type\":\"address\"},{\"internalType\":\"Claim\",\"name\":\"absolutePrestate\",\"type\":\"bytes32\"}],\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"name\":\"_opChainConfigs\",\"type\":\"tuple[]\"}],\"name\":\"upgrade\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"upgradeController\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"who\",\"type\":\"address\"}],\"name\":\"AddressHasNoCode\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"who\",\"type\":\"address\"}],\"name\":\"AddressNotFound\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"AlreadyReleased\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidChainId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidGameConfigs\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"string\",\"name\":\"role\",\"type\":\"string\"}],\"name\":\"InvalidRoleAddress\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidStartingAnchorRoot\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"LatestReleaseNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyDelegatecall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyUpgradeController\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PrestateNotSet\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"PrestateRequired\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"contractISystemConfig\",\"name\":\"systemConfig\",\"type\":\"address\"}],\"name\":\"SuperchainConfigMismatch\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"SuperchainProxyAdminMismatch\",\"type\":\"error\"}]", + ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"_opcmGameTypeAdder\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerGameTypeAdder\"},{\"name\":\"_opcmDeployer\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerDeployer\"},{\"name\":\"_opcmUpgrader\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerUpgrader\"},{\"name\":\"_opcmInteropMigrator\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerInteropMigrator\"},{\"name\":\"_opcmStandardValidator\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerStandardValidator\"},{\"name\":\"_superchainConfig\",\"type\":\"address\",\"internalType\":\"contractISuperchainConfig\"},{\"name\":\"_protocolVersions\",\"type\":\"address\",\"internalType\":\"contractIProtocolVersions\"},{\"name\":\"_superchainProxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"addGameType\",\"inputs\":[{\"name\":\"_gameConfigs\",\"type\":\"tuple[]\",\"internalType\":\"structOPContractsManager.AddGameInput[]\",\"components\":[{\"name\":\"saltMixer\",\"type\":\"string\",\"internalType\":\"string\"},{\"name\":\"systemConfig\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"delayedWETH\",\"type\":\"address\",\"internalType\":\"contractIDelayedWETH\"},{\"name\":\"disputeGameType\",\"type\":\"uint32\",\"internalType\":\"GameType\"},{\"name\":\"disputeAbsolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"Claim\"},{\"name\":\"disputeMaxGameDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"disputeSplitDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"disputeClockExtension\",\"type\":\"uint64\",\"internalType\":\"Duration\"},{\"name\":\"disputeMaxClockDuration\",\"type\":\"uint64\",\"internalType\":\"Duration\"},{\"name\":\"initialBond\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"vm\",\"type\":\"address\",\"internalType\":\"contractIBigStepper\"},{\"name\":\"permissioned\",\"type\":\"bool\",\"internalType\":\"bool\"}]}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple[]\",\"internalType\":\"structOPContractsManager.AddGameOutput[]\",\"components\":[{\"name\":\"delayedWETH\",\"type\":\"address\",\"internalType\":\"contractIDelayedWETH\"},{\"name\":\"faultDisputeGame\",\"type\":\"address\",\"internalType\":\"contractIFaultDisputeGame\"}]}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"blueprints\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManager.Blueprints\",\"components\":[{\"name\":\"addressManager\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"proxy\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1ChugSplashProxy\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"resolvedDelegateProxy\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"permissionedDisputeGame1\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"permissionedDisputeGame2\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"permissionlessDisputeGame1\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"permissionlessDisputeGame2\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"superPermissionedDisputeGame1\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"superPermissionedDisputeGame2\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"superPermissionlessDisputeGame1\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"superPermissionlessDisputeGame2\",\"type\":\"address\",\"internalType\":\"address\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"chainIdToBatchInboxAddress\",\"inputs\":[{\"name\":\"_l2ChainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"deploy\",\"inputs\":[{\"name\":\"_input\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManager.DeployInput\",\"components\":[{\"name\":\"roles\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManager.Roles\",\"components\":[{\"name\":\"opChainProxyAdminOwner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"systemConfigOwner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"batcher\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"unsafeBlockSigner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"proposer\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"challenger\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"name\":\"basefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"blobBasefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"l2ChainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"startingAnchorRoot\",\"type\":\"bytes\",\"internalType\":\"bytes\"},{\"name\":\"saltMixer\",\"type\":\"string\",\"internalType\":\"string\"},{\"name\":\"gasLimit\",\"type\":\"uint64\",\"internalType\":\"uint64\"},{\"name\":\"disputeGameType\",\"type\":\"uint32\",\"internalType\":\"GameType\"},{\"name\":\"disputeAbsolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"Claim\"},{\"name\":\"disputeMaxGameDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"disputeSplitDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"disputeClockExtension\",\"type\":\"uint64\",\"internalType\":\"Duration\"},{\"name\":\"disputeMaxClockDuration\",\"type\":\"uint64\",\"internalType\":\"Duration\"}]}],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManager.DeployOutput\",\"components\":[{\"name\":\"opChainProxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"addressManager\",\"type\":\"address\",\"internalType\":\"contractIAddressManager\"},{\"name\":\"l1ERC721BridgeProxy\",\"type\":\"address\",\"internalType\":\"contractIL1ERC721Bridge\"},{\"name\":\"systemConfigProxy\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"optimismMintableERC20FactoryProxy\",\"type\":\"address\",\"internalType\":\"contractIOptimismMintableERC20Factory\"},{\"name\":\"l1StandardBridgeProxy\",\"type\":\"address\",\"internalType\":\"contractIL1StandardBridge\"},{\"name\":\"l1CrossDomainMessengerProxy\",\"type\":\"address\",\"internalType\":\"contractIL1CrossDomainMessenger\"},{\"name\":\"ethLockboxProxy\",\"type\":\"address\",\"internalType\":\"contractIETHLockbox\"},{\"name\":\"optimismPortalProxy\",\"type\":\"address\",\"internalType\":\"contractIOptimismPortal2\"},{\"name\":\"disputeGameFactoryProxy\",\"type\":\"address\",\"internalType\":\"contractIDisputeGameFactory\"},{\"name\":\"anchorStateRegistryProxy\",\"type\":\"address\",\"internalType\":\"contractIAnchorStateRegistry\"},{\"name\":\"faultDisputeGame\",\"type\":\"address\",\"internalType\":\"contractIFaultDisputeGame\"},{\"name\":\"permissionedDisputeGame\",\"type\":\"address\",\"internalType\":\"contractIPermissionedDisputeGame\"},{\"name\":\"delayedWETHPermissionedGameProxy\",\"type\":\"address\",\"internalType\":\"contractIDelayedWETH\"},{\"name\":\"delayedWETHPermissionlessGameProxy\",\"type\":\"address\",\"internalType\":\"contractIDelayedWETH\"}]}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"devFeatureBitmap\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"implementations\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManager.Implementations\",\"components\":[{\"name\":\"superchainConfigImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"protocolVersionsImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1ERC721BridgeImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismPortalImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismPortalInteropImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"ethLockboxImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"systemConfigImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismMintableERC20FactoryImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1CrossDomainMessengerImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1StandardBridgeImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"disputeGameFactoryImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"anchorStateRegistryImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"delayedWETHImpl\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"mipsImpl\",\"type\":\"address\",\"internalType\":\"address\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"isDevFeatureEnabled\",\"inputs\":[{\"name\":\"_feature\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"migrate\",\"inputs\":[{\"name\":\"_input\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManagerInteropMigrator.MigrateInput\",\"components\":[{\"name\":\"usePermissionlessGame\",\"type\":\"bool\",\"internalType\":\"bool\"},{\"name\":\"startingAnchorRoot\",\"type\":\"tuple\",\"internalType\":\"structProposal\",\"components\":[{\"name\":\"root\",\"type\":\"bytes32\",\"internalType\":\"Hash\"},{\"name\":\"l2SequenceNumber\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]},{\"name\":\"gameParameters\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManagerInteropMigrator.GameParameters\",\"components\":[{\"name\":\"proposer\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"challenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"maxGameDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"splitDepth\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"initBond\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"clockExtension\",\"type\":\"uint64\",\"internalType\":\"Duration\"},{\"name\":\"maxClockDuration\",\"type\":\"uint64\",\"internalType\":\"Duration\"}]},{\"name\":\"opChainConfigs\",\"type\":\"tuple[]\",\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"components\":[{\"name\":\"systemConfigProxy\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"absolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"Claim\"}]}]}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"opcmDeployer\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerDeployer\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"opcmGameTypeAdder\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerGameTypeAdder\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"opcmInteropMigrator\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerInteropMigrator\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"opcmStandardValidator\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerStandardValidator\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"opcmUpgrader\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractOPContractsManagerUpgrader\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"protocolVersions\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractIProtocolVersions\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"superchainConfig\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractISuperchainConfig\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"superchainProxyAdmin\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"updatePrestate\",\"inputs\":[{\"name\":\"_prestateUpdateInputs\",\"type\":\"tuple[]\",\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"components\":[{\"name\":\"systemConfigProxy\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"absolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"Claim\"}]}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"upgrade\",\"inputs\":[{\"name\":\"_opChainConfigs\",\"type\":\"tuple[]\",\"internalType\":\"structOPContractsManager.OpChainConfig[]\",\"components\":[{\"name\":\"systemConfigProxy\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"absolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"Claim\"}]}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"validate\",\"inputs\":[{\"name\":\"_input\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManagerStandardValidator.ValidationInput\",\"components\":[{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"sysCfg\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"absolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"l2ChainID\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]},{\"name\":\"_allowFailure\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"validateWithOverrides\",\"inputs\":[{\"name\":\"_input\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManagerStandardValidator.ValidationInput\",\"components\":[{\"name\":\"proxyAdmin\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"},{\"name\":\"sysCfg\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"},{\"name\":\"absolutePrestate\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"l2ChainID\",\"type\":\"uint256\",\"internalType\":\"uint256\"}]},{\"name\":\"_allowFailure\",\"type\":\"bool\",\"internalType\":\"bool\"},{\"name\":\"_overrides\",\"type\":\"tuple\",\"internalType\":\"structOPContractsManagerStandardValidator.ValidationOverrides\",\"components\":[{\"name\":\"l1PAOMultisig\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"challenger\",\"type\":\"address\",\"internalType\":\"address\"}]}],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"pure\"},{\"type\":\"error\",\"name\":\"AddressHasNoCode\",\"inputs\":[{\"name\":\"who\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"AddressNotFound\",\"inputs\":[{\"name\":\"who\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"type\":\"error\",\"name\":\"AlreadyReleased\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"InvalidChainId\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"InvalidGameConfigs\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"InvalidRoleAddress\",\"inputs\":[{\"name\":\"role\",\"type\":\"string\",\"internalType\":\"string\"}]},{\"type\":\"error\",\"name\":\"InvalidStartingAnchorRoot\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"LatestReleaseNotSet\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"OnlyDelegatecall\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"PrestateNotSet\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"PrestateRequired\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"SuperchainConfigMismatch\",\"inputs\":[{\"name\":\"systemConfig\",\"type\":\"address\",\"internalType\":\"contractISystemConfig\"}]},{\"type\":\"error\",\"name\":\"SuperchainProxyAdminMismatch\",\"inputs\":[]}]", + Bin: "0x6101a06040523480156200001257600080fd5b5060405162002cd838038062002cd883398101604081905262000035916200030c565b60405163b6a4cd2160e01b81526001600160a01b03848116600483015288169063b6a4cd219060240160006040518083038186803b1580156200007757600080fd5b505afa1580156200008c573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b0385811660048301528a16925063b6a4cd21915060240160006040518083038186803b158015620000d257600080fd5b505afa158015620000e7573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b038b811660048301528a16925063b6a4cd21915060240160006040518083038186803b1580156200012d57600080fd5b505afa15801562000142573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b038a1660048201819052925063b6a4cd21915060240160006040518083038186803b1580156200018757600080fd5b505afa1580156200019c573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b0389811660048301528a16925063b6a4cd21915060240160006040518083038186803b158015620001e257600080fd5b505afa158015620001f7573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b0388811660048301528a16925063b6a4cd21915060240160006040518083038186803b1580156200023d57600080fd5b505afa15801562000252573d6000803e3d6000fd5b505060405163b6a4cd2160e01b81526001600160a01b0387811660048301528a16925063b6a4cd21915060240160006040518083038186803b1580156200029857600080fd5b505afa158015620002ad573d6000803e3d6000fd5b5050506001600160a01b039889166080525095871660a05293861660c05291851660e05284166101005283166101205282166101405216610160523061018052620003cd565b6001600160a01b03811681146200030957600080fd5b50565b600080600080600080600080610100898b0312156200032a57600080fd5b88516200033781620002f3565b60208a01519098506200034a81620002f3565b60408a01519097506200035d81620002f3565b60608a01519096506200037081620002f3565b60808a01519095506200038381620002f3565b60a08a01519094506200039681620002f3565b60c08a0151909350620003a981620002f3565b60e08a0151909250620003bc81620002f3565b809150509295985092959890939650565b60805160a05160c05160e051610100516101205161014051610160516101805161281c620004bc600039600081816104580152818161084f01528181610be501528181610cf10152610ed60152600061022d0152600061035801526000818161029c0152610a170152600081816103f0015281816106230152610ab80152600081816101d0015261091901526000818161018c01528181610cbc0152610fa00152600081816103310152818161056b01528181610719015281816107cc015281816109e001528181610b680152610e4b015260008181610417015281816105240152610dbb015261281c6000f3fe608060405234801561001057600080fd5b50600436106101825760003560e01c8063613e827b116100d8578063b0b807eb1161008c578063ba7903db11610066578063ba7903db146103eb578063becbdf4a14610412578063ff2dd5a11461043957600080fd5b8063b0b807eb146103b0578063b23cc044146103c3578063b51f9c2b146103d657600080fd5b80636624856a116100bd5780636624856a1461035357806367cda69c1461037a57806378ecabce1461038d57600080fd5b8063613e827b1461030c578063622d56f11461032c57600080fd5b806330d148881161013a57806335e80ab31161011457806335e80ab3146102975780633fe13f3f146102be57806354fd4d50146102d357600080fd5b806330d148881461024f57806330e9012c1461026f578063318b1b801461028457600080fd5b80631661a2e91161016b5780631661a2e9146101f25780631d8a4e92146102125780632b96b8391461022857600080fd5b806303dbe68c146101875780631481a724146101cb575b600080fd5b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b6040516001600160a01b0390911681526020015b60405180910390f35b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b610205610200366004611260565b61044c565b6040516101c29190611408565b61021a610567565b6040519081526020016101c2565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b61026261025d3660046114dc565b6105f0565b6040516101c2919061156c565b6102776106a6565b6040516101c2919061157f565b6101ae6102923660046116b3565b61079a565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b6102d16102cc3660046116cc565b610845565b005b60408051808201909152600581527f342e302e300000000000000000000000000000000000000000000000000000006020820152610262565b61031f61031a366004611708565b610943565b6040516101c29190611744565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b610262610388366004611881565b610a85565b6103a061039b3660046116b3565b610b36565b60405190151581526020016101c2565b6102d16103be36600461191b565b610bdb565b6102d16103d1366004611949565b610ce7565b6103de610de0565b6040516101c29190611a10565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b6101ae7f000000000000000000000000000000000000000000000000000000000000000081565b6102d1610447366004611b33565b610ecc565b60606001600160a01b037f00000000000000000000000000000000000000000000000000000000000000001630036104b0576040517f0a57d61d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000826040516024016104c39190611bf7565b60408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f1661a2e900000000000000000000000000000000000000000000000000000000179052905060006105497f000000000000000000000000000000000000000000000000000000000000000083610fc1565b90508080602001905181019061055f9190611d44565b949350505050565b60007f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316631d8a4e926040518163ffffffff1660e01b8152600401602060405180830381865afa1580156105c7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105eb9190611e0d565b905090565b6040517f30d148880000000000000000000000000000000000000000000000000000000081526060906001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906330d148889061065a9086908690600401611e26565b600060405180830381865afa158015610677573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f1916820160405261069f9190810190611e71565b9392505050565b604080516101c081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e08101829052610100810182905261012081018290526101408101829052610160810182905261018081018290526101a08101919091527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03166330e9012c6040518163ffffffff1660e01b81526004016101c060405180830381865afa158015610776573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105eb9190611ee8565b6040517f318b1b80000000000000000000000000000000000000000000000000000000008152600481018290526000907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169063318b1b8090602401602060405180830381865afa15801561081b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061083f9190611ff8565b92915050565b6001600160a01b037f00000000000000000000000000000000000000000000000000000000000000001630036108a7576040517f0a57d61d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6000816040516024016108ba91906120ed565b60408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f3fe13f3f00000000000000000000000000000000000000000000000000000000179052905061093e7f000000000000000000000000000000000000000000000000000000000000000082610fc1565b505050565b604080516101e081018252600080825260208201819052818301819052606082018190526080820181905260a0820181905260c0820181905260e08201819052610100820181905261012082018190526101408201819052610160820181905261018082018190526101a082018190526101c082015290517fb2e48a3f0000000000000000000000000000000000000000000000000000000081527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b03169063b2e48a3f90610a419085907f0000000000000000000000000000000000000000000000000000000000000000903390600401612300565b6101e0604051808303816000875af1158015610a61573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061083f91906124b5565b6040517f67cda69c0000000000000000000000000000000000000000000000000000000081526060906001600160a01b037f000000000000000000000000000000000000000000000000000000000000000016906367cda69c90610af1908790879087906004016125cc565b600060405180830381865afa158015610b0e573d6000803e3d6000fd5b505050506040513d6000823e601f3d908101601f1916820160405261055f9190810190611e71565b6040517f78ecabce000000000000000000000000000000000000000000000000000000008152600481018290526000907f00000000000000000000000000000000000000000000000000000000000000006001600160a01b0316906378ecabce90602401602060405180830381865afa158015610bb7573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061083f9190612636565b6001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000163003610c3d576040517f0a57d61d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040516001600160a01b0380841660248301528216604482015260009060640160408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fb0b807eb000000000000000000000000000000000000000000000000000000001790529050610ce17f000000000000000000000000000000000000000000000000000000000000000082610fc1565b50505050565b6001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000163003610d49576040517f0a57d61d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081604051602401610d5c9190612653565b60408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fb23cc04400000000000000000000000000000000000000000000000000000000179052905061093e7f000000000000000000000000000000000000000000000000000000000000000082610fc1565b604080516101a081018252600080825260208201819052918101829052606081018290526080810182905260a0810182905260c0810182905260e0810182905261010081018290526101208101829052610140810182905261016081018290526101808101919091527f00000000000000000000000000000000000000000000000000000000000000006001600160a01b031663b51f9c2b6040518163ffffffff1660e01b81526004016101a060405180830381865afa158015610ea8573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105eb91906126a8565b6001600160a01b037f0000000000000000000000000000000000000000000000000000000000000000163003610f2e576040517f0a57d61d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600081604051602401610f41919061279b565b60408051601f198184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fff2dd5a100000000000000000000000000000000000000000000000000000000179052905061093e7f0000000000000000000000000000000000000000000000000000000000000000825b6060600080846001600160a01b031684604051610fde91906127f3565b600060405180830381855af49150503d8060008114611019576040519150601f19603f3d011682016040523d82523d6000602084013e61101e565b606091505b50915091508161055f57805160208201fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6040516101a0810167ffffffffffffffff8111828210171561108357611083611030565b60405290565b6040805190810167ffffffffffffffff8111828210171561108357611083611030565b6040516060810167ffffffffffffffff8111828210171561108357611083611030565b6040516101c0810167ffffffffffffffff8111828210171561108357611083611030565b6040516101e0810167ffffffffffffffff8111828210171561108357611083611030565b604051601f8201601f1916810167ffffffffffffffff8111828210171561114057611140611030565b604052919050565b600067ffffffffffffffff82111561116257611162611030565b5060051b60200190565b600067ffffffffffffffff82111561118657611186611030565b50601f01601f191660200190565b600082601f8301126111a557600080fd5b81356111b86111b38261116c565b611117565b8181528460208386010111156111cd57600080fd5b816020850160208301376000918101602001919091529392505050565b6001600160a01b03811681146111ff57600080fd5b50565b803561120d816111ea565b919050565b803563ffffffff8116811461120d57600080fd5b67ffffffffffffffff811681146111ff57600080fd5b803561120d81611226565b80151581146111ff57600080fd5b803561120d81611247565b6000602080838503121561127357600080fd5b823567ffffffffffffffff8082111561128b57600080fd5b818501915085601f83011261129f57600080fd5b81356112ad6111b382611148565b81815260059190911b830184019084810190888311156112cc57600080fd5b8585015b838110156113fb578035858111156112e85760008081fd5b86016101a0818c03601f19018113156113015760008081fd5b61130961105f565b898301358881111561131b5760008081fd5b6113298e8c83870101611194565b8252506040611339818501611202565b8b830152606061134a818601611202565b828401526080915061135d828601611202565b9083015260a061136e858201611212565b8284015260c0915081850135818401525060e0808501358284015261010091508185013581840152506101206113a581860161123c565b8284015261014091506113b982860161123c565b8184015250610160808501358284015261018091506113d9828601611202565b908301526113e8848401611255565b90820152855250509186019186016112d0565b5098975050505050505050565b602080825282518282018190526000919060409081850190868401855b8281101561145757815180516001600160a01b0390811686529087015116868501529284019290850190600101611425565b5091979650505050505050565b60006080828403121561147657600080fd5b6040516080810181811067ffffffffffffffff8211171561149957611499611030565b60405290508082356114aa816111ea565b815260208301356114ba816111ea565b8060208301525060408301356040820152606083013560608201525092915050565b60008060a083850312156114ef57600080fd5b6114f98484611464565b9150608083013561150981611247565b809150509250929050565b60005b8381101561152f578181015183820152602001611517565b83811115610ce15750506000910152565b60008151808452611558816020860160208601611514565b601f01601f19169290920160200192915050565b60208152600061069f6020830184611540565b81516001600160a01b031681526101c0810160208301516115ab60208401826001600160a01b03169052565b5060408301516115c660408401826001600160a01b03169052565b5060608301516115e160608401826001600160a01b03169052565b5060808301516115fc60808401826001600160a01b03169052565b5060a083015161161760a08401826001600160a01b03169052565b5060c083015161163260c08401826001600160a01b03169052565b5060e083015161164d60e08401826001600160a01b03169052565b50610100838101516001600160a01b0390811691840191909152610120808501518216908401526101408085015182169084015261016080850151821690840152610180808501518216908401526101a08085015191821681850152905b505092915050565b6000602082840312156116c557600080fd5b5035919050565b6000602082840312156116de57600080fd5b813567ffffffffffffffff8111156116f557600080fd5b8201610160818503121561069f57600080fd5b60006020828403121561171a57600080fd5b813567ffffffffffffffff81111561173157600080fd5b8201610240818503121561069f57600080fd5b81516001600160a01b031681526101e08101602083015161177060208401826001600160a01b03169052565b50604083015161178b60408401826001600160a01b03169052565b5060608301516117a660608401826001600160a01b03169052565b5060808301516117c160808401826001600160a01b03169052565b5060a08301516117dc60a08401826001600160a01b03169052565b5060c08301516117f760c08401826001600160a01b03169052565b5060e083015161181260e08401826001600160a01b03169052565b50610100838101516001600160a01b0390811691840191909152610120808501518216908401526101408085015182169084015261016080850151821690840152610180808501518216908401526101a0808501518216908401526101c08085015191821681850152906116ab565b600080600083850360e081121561189757600080fd5b6118a18686611464565b935060808501356118b181611247565b925060407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff60820112156118e357600080fd5b506118ec611089565b60a08501356118fa816111ea565b815260c085013561190a816111ea565b602082015292959194509192509050565b6000806040838503121561192e57600080fd5b8235611939816111ea565b91506020830135611509816111ea565b6000602080838503121561195c57600080fd5b823567ffffffffffffffff81111561197357600080fd5b8301601f8101851361198457600080fd5b80356119926111b382611148565b818152606091820283018401918482019190888411156119b157600080fd5b938501935b83851015611a045780858a0312156119ce5760008081fd5b6119d66110ac565b85356119e1816111ea565b8152858701358782015260408087013590820152835293840193918501916119b6565b50979650505050505050565b81516001600160a01b031681526101a081016020830151611a3c60208401826001600160a01b03169052565b506040830151611a5760408401826001600160a01b03169052565b506060830151611a7260608401826001600160a01b03169052565b506080830151611a8d60808401826001600160a01b03169052565b5060a0830151611aa860a08401826001600160a01b03169052565b5060c0830151611ac360c08401826001600160a01b03169052565b5060e0830151611ade60e08401826001600160a01b03169052565b50610100838101516001600160a01b03908116918401919091526101208085015182169084015261014080850151821690840152610160808501518216908401526101808085015191821681850152906116ab565b60006020808385031215611b4657600080fd5b823567ffffffffffffffff811115611b5d57600080fd5b8301601f81018513611b6e57600080fd5b8035611b7c6111b382611148565b81815260609182028301840191848201919088841115611b9b57600080fd5b938501935b83851015611a045780858a031215611bb85760008081fd5b611bc06110ac565b8535611bcb816111ea565b815285870135611bda816111ea565b818801526040868101359082015283529384019391850191611ba0565b60006020808301818452808551808352604092508286019150828160051b87010184880160005b83811015611d2b577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc089840301855281516101a08151818652611c6382870182611540565b91505088820151611c7e8a8701826001600160a01b03169052565b50878201516001600160a01b03908116868a015260608084015182169087015260808084015163ffffffff169087015260a0808401519087015260c0808401519087015260e080840151908701526101008084015167ffffffffffffffff9081169188019190915261012080850151909116908701526101408084015190870152610160808401519091169086015261018091820151151591909401529386019390860190600101611c1e565b509098975050505050505050565b805161120d816111ea565b60006020808385031215611d5757600080fd5b825167ffffffffffffffff811115611d6e57600080fd5b8301601f81018513611d7f57600080fd5b8051611d8d6111b382611148565b81815260069190911b82018301908381019087831115611dac57600080fd5b928401925b82841015611e025760408489031215611dca5760008081fd5b611dd2611089565b8451611ddd816111ea565b815284860151611dec816111ea565b8187015282526040939093019290840190611db1565b979650505050505050565b600060208284031215611e1f57600080fd5b5051919050565b60a08101611e6282856001600160a01b038082511683528060208301511660208401525060408101516040830152606081015160608301525050565b82151560808301529392505050565b600060208284031215611e8357600080fd5b815167ffffffffffffffff811115611e9a57600080fd5b8201601f81018413611eab57600080fd5b8051611eb96111b38261116c565b818152856020838501011115611ece57600080fd5b611edf826020830160208601611514565b95945050505050565b60006101c08284031215611efb57600080fd5b611f036110cf565b611f0c83611d39565b8152611f1a60208401611d39565b6020820152611f2b60408401611d39565b6040820152611f3c60608401611d39565b6060820152611f4d60808401611d39565b6080820152611f5e60a08401611d39565b60a0820152611f6f60c08401611d39565b60c0820152611f8060e08401611d39565b60e0820152610100611f93818501611d39565b90820152610120611fa5848201611d39565b90820152610140611fb7848201611d39565b90820152610160611fc9848201611d39565b90820152610180611fdb848201611d39565b908201526101a0611fed848201611d39565b908201529392505050565b60006020828403121561200a57600080fd5b815161069f816111ea565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe184360301811261204a57600080fd5b830160208101925035905067ffffffffffffffff81111561206a57600080fd5b60608102360382131561207c57600080fd5b9250929050565b8183526000602080850194508260005b858110156120e25781356120a6816111ea565b6001600160a01b03908116885282840135906120c1826111ea565b16878401526040828101359088015260609687019690910190600101612093565b509495945050505050565b60208152600082356120fe81611247565b80151560208401525060208301356040830152604083013560608301526060830135612129816111ea565b6001600160a01b03808216608085015260808501359150612149826111ea565b80821660a0850152505060a083013560c083015260c083013560e083015261010060e08401358184015280840135905061218281611226565b61012067ffffffffffffffff82168185015261219f81860161123c565b9150506101406121ba8185018367ffffffffffffffff169052565b6121c681860186612015565b6101608681015292509050611edf61018085018383612083565b80356121eb816111ea565b6001600160a01b039081168352602082013590612207826111ea565b908116602084015260408201359061221e826111ea565b9081166040840152606082013590612235826111ea565b908116606084015260808201359061224c826111ea565b908116608084015260a082013590612263826111ea565b80821660a085015250505050565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe18436030181126122a657600080fd5b830160208101925035905067ffffffffffffffff8111156122c657600080fd5b80360382131561207c57600080fd5b818352818160208501375060006020828401015260006020601f19601f840116840101905092915050565b6060815261231160608201856121e0565b600061231f60c08601611212565b6101206123338185018363ffffffff169052565b61233f60e08801611212565b91506101406123558186018463ffffffff169052565b61016092506101008801358386015261237082890189612271565b9250610240610180818189015261238c6102a0890186856122d5565b945061239a848c018c612271565b945092506101a07fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa089870301818a01526123d58686866122d5565b95506123e2878d0161123c565b96506101c094506123fe858a018867ffffffffffffffff169052565b612409828d01611212565b96506101e09350612421848a018863ffffffff169052565b6102009650808c0135878a01525050610220838b013581890152828b01358289015261244e868c0161123c565b67ffffffffffffffff81166102608a0152955061246c818c0161123c565b95505050505061248961028085018367ffffffffffffffff169052565b6001600160a01b038616602085015291506124a19050565b6001600160a01b038316604083015261055f565b60006101e082840312156124c857600080fd5b6124d06110f3565b6124d983611d39565b81526124e760208401611d39565b60208201526124f860408401611d39565b604082015261250960608401611d39565b606082015261251a60808401611d39565b608082015261252b60a08401611d39565b60a082015261253c60c08401611d39565b60c082015261254d60e08401611d39565b60e0820152610100612560818501611d39565b90820152610120612572848201611d39565b90820152610140612584848201611d39565b90820152610160612596848201611d39565b908201526101806125a8848201611d39565b908201526101a06125ba848201611d39565b908201526101c0611fed848201611d39565b60e0810161260882866001600160a01b038082511683528060208301511660208401525060408101516040830152606081015160608301525050565b83151560808301526001600160a01b038084511660a08401528060208501511660c084015250949350505050565b60006020828403121561264857600080fd5b815161069f81611247565b602080825282518282018190526000919060409081850190868401855b8281101561145757815180516001600160a01b0316855286810151878601528501518585015260609093019290850190600101612670565b60006101a082840312156126bb57600080fd5b6126c361105f565b6126cc83611d39565b81526126da60208401611d39565b60208201526126eb60408401611d39565b60408201526126fc60608401611d39565b606082015261270d60808401611d39565b608082015261271e60a08401611d39565b60a082015261272f60c08401611d39565b60c082015261274060e08401611d39565b60e0820152610100612753818501611d39565b90820152610120612765848201611d39565b90820152610140612777848201611d39565b90820152610160612789848201611d39565b90820152610180611fed848201611d39565b602080825282518282018190526000919060409081850190868401855b8281101561145757815180516001600160a01b03908116865287820151168786015285015185850152606090930192908501906001016127b8565b60008251612805818460208701611514565b919091019291505056fea164736f6c634300080f000a", } // OPContractsManagerABI is the input ABI used to generate the binding from. // Deprecated: Use OPContractsManagerMetaData.ABI instead. var OPContractsManagerABI = OPContractsManagerMetaData.ABI +// OPContractsManagerBin is the compiled bytecode used for deploying new contracts. +// Deprecated: Use OPContractsManagerMetaData.Bin instead. +var OPContractsManagerBin = OPContractsManagerMetaData.Bin + +// DeployOPContractsManager deploys a new Ethereum contract, binding an instance of OPContractsManager to it. +func DeployOPContractsManager(auth *bind.TransactOpts, backend bind.ContractBackend, _opcmGameTypeAdder common.Address, _opcmDeployer common.Address, _opcmUpgrader common.Address, _opcmInteropMigrator common.Address, _opcmStandardValidator common.Address, _superchainConfig common.Address, _protocolVersions common.Address, _superchainProxyAdmin common.Address, _l1PAO common.Address) (common.Address, *types.Transaction, *OPContractsManager, error) { + parsed, err := OPContractsManagerMetaData.GetAbi() + if err != nil { + return common.Address{}, nil, nil, err + } + if parsed == nil { + return common.Address{}, nil, nil, errors.New("GetABI returned nil") + } + + address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(OPContractsManagerBin), backend, _opcmGameTypeAdder, _opcmDeployer, _opcmUpgrader, _opcmInteropMigrator, _opcmStandardValidator, _superchainConfig, _protocolVersions, _superchainProxyAdmin, _l1PAO) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &OPContractsManager{OPContractsManagerCaller: OPContractsManagerCaller{contract: contract}, OPContractsManagerTransactor: OPContractsManagerTransactor{contract: contract}, OPContractsManagerFilterer: OPContractsManagerFilterer{contract: contract}}, nil +} + // OPContractsManager is an auto generated Go binding around an Ethereum contract. type OPContractsManager struct { OPContractsManagerCaller // Read-only binding to the contract @@ -377,97 +414,97 @@ func (_OPContractsManager *OPContractsManagerCallerSession) ChainIdToBatchInboxA return _OPContractsManager.Contract.ChainIdToBatchInboxAddress(&_OPContractsManager.CallOpts, _l2ChainId) } -// Implementations is a free data retrieval call binding the contract method 0x30e9012c. +// DevFeatureBitmap is a free data retrieval call binding the contract method 0x1d8a4e92. // -// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address)) -func (_OPContractsManager *OPContractsManagerCaller) Implementations(opts *bind.CallOpts) (OPContractsManagerImplementations, error) { +// Solidity: function devFeatureBitmap() view returns(bytes32) +func (_OPContractsManager *OPContractsManagerCaller) DevFeatureBitmap(opts *bind.CallOpts) ([32]byte, error) { var out []interface{} - err := _OPContractsManager.contract.Call(opts, &out, "implementations") + err := _OPContractsManager.contract.Call(opts, &out, "devFeatureBitmap") if err != nil { - return *new(OPContractsManagerImplementations), err + return *new([32]byte), err } - out0 := *abi.ConvertType(out[0], new(OPContractsManagerImplementations)).(*OPContractsManagerImplementations) + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) return out0, err } -// Implementations is a free data retrieval call binding the contract method 0x30e9012c. +// DevFeatureBitmap is a free data retrieval call binding the contract method 0x1d8a4e92. // -// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address)) -func (_OPContractsManager *OPContractsManagerSession) Implementations() (OPContractsManagerImplementations, error) { - return _OPContractsManager.Contract.Implementations(&_OPContractsManager.CallOpts) +// Solidity: function devFeatureBitmap() view returns(bytes32) +func (_OPContractsManager *OPContractsManagerSession) DevFeatureBitmap() ([32]byte, error) { + return _OPContractsManager.Contract.DevFeatureBitmap(&_OPContractsManager.CallOpts) } -// Implementations is a free data retrieval call binding the contract method 0x30e9012c. +// DevFeatureBitmap is a free data retrieval call binding the contract method 0x1d8a4e92. // -// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address)) -func (_OPContractsManager *OPContractsManagerCallerSession) Implementations() (OPContractsManagerImplementations, error) { - return _OPContractsManager.Contract.Implementations(&_OPContractsManager.CallOpts) +// Solidity: function devFeatureBitmap() view returns(bytes32) +func (_OPContractsManager *OPContractsManagerCallerSession) DevFeatureBitmap() ([32]byte, error) { + return _OPContractsManager.Contract.DevFeatureBitmap(&_OPContractsManager.CallOpts) } -// IsRC is a free data retrieval call binding the contract method 0xf179c48d. +// Implementations is a free data retrieval call binding the contract method 0x30e9012c. // -// Solidity: function isRC() view returns(bool) -func (_OPContractsManager *OPContractsManagerCaller) IsRC(opts *bind.CallOpts) (bool, error) { +// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address,address)) +func (_OPContractsManager *OPContractsManagerCaller) Implementations(opts *bind.CallOpts) (OPContractsManagerImplementations, error) { var out []interface{} - err := _OPContractsManager.contract.Call(opts, &out, "isRC") + err := _OPContractsManager.contract.Call(opts, &out, "implementations") if err != nil { - return *new(bool), err + return *new(OPContractsManagerImplementations), err } - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + out0 := *abi.ConvertType(out[0], new(OPContractsManagerImplementations)).(*OPContractsManagerImplementations) return out0, err } -// IsRC is a free data retrieval call binding the contract method 0xf179c48d. +// Implementations is a free data retrieval call binding the contract method 0x30e9012c. // -// Solidity: function isRC() view returns(bool) -func (_OPContractsManager *OPContractsManagerSession) IsRC() (bool, error) { - return _OPContractsManager.Contract.IsRC(&_OPContractsManager.CallOpts) +// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address,address)) +func (_OPContractsManager *OPContractsManagerSession) Implementations() (OPContractsManagerImplementations, error) { + return _OPContractsManager.Contract.Implementations(&_OPContractsManager.CallOpts) } -// IsRC is a free data retrieval call binding the contract method 0xf179c48d. +// Implementations is a free data retrieval call binding the contract method 0x30e9012c. // -// Solidity: function isRC() view returns(bool) -func (_OPContractsManager *OPContractsManagerCallerSession) IsRC() (bool, error) { - return _OPContractsManager.Contract.IsRC(&_OPContractsManager.CallOpts) +// Solidity: function implementations() view returns((address,address,address,address,address,address,address,address,address,address,address,address,address,address)) +func (_OPContractsManager *OPContractsManagerCallerSession) Implementations() (OPContractsManagerImplementations, error) { + return _OPContractsManager.Contract.Implementations(&_OPContractsManager.CallOpts) } -// L1ContractsRelease is a free data retrieval call binding the contract method 0x35cb2e9b. +// IsDevFeatureEnabled is a free data retrieval call binding the contract method 0x78ecabce. // -// Solidity: function l1ContractsRelease() view returns(string) -func (_OPContractsManager *OPContractsManagerCaller) L1ContractsRelease(opts *bind.CallOpts) (string, error) { +// Solidity: function isDevFeatureEnabled(bytes32 _feature) view returns(bool) +func (_OPContractsManager *OPContractsManagerCaller) IsDevFeatureEnabled(opts *bind.CallOpts, _feature [32]byte) (bool, error) { var out []interface{} - err := _OPContractsManager.contract.Call(opts, &out, "l1ContractsRelease") + err := _OPContractsManager.contract.Call(opts, &out, "isDevFeatureEnabled", _feature) if err != nil { - return *new(string), err + return *new(bool), err } - out0 := *abi.ConvertType(out[0], new(string)).(*string) + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) return out0, err } -// L1ContractsRelease is a free data retrieval call binding the contract method 0x35cb2e9b. +// IsDevFeatureEnabled is a free data retrieval call binding the contract method 0x78ecabce. // -// Solidity: function l1ContractsRelease() view returns(string) -func (_OPContractsManager *OPContractsManagerSession) L1ContractsRelease() (string, error) { - return _OPContractsManager.Contract.L1ContractsRelease(&_OPContractsManager.CallOpts) +// Solidity: function isDevFeatureEnabled(bytes32 _feature) view returns(bool) +func (_OPContractsManager *OPContractsManagerSession) IsDevFeatureEnabled(_feature [32]byte) (bool, error) { + return _OPContractsManager.Contract.IsDevFeatureEnabled(&_OPContractsManager.CallOpts, _feature) } -// L1ContractsRelease is a free data retrieval call binding the contract method 0x35cb2e9b. +// IsDevFeatureEnabled is a free data retrieval call binding the contract method 0x78ecabce. // -// Solidity: function l1ContractsRelease() view returns(string) -func (_OPContractsManager *OPContractsManagerCallerSession) L1ContractsRelease() (string, error) { - return _OPContractsManager.Contract.L1ContractsRelease(&_OPContractsManager.CallOpts) +// Solidity: function isDevFeatureEnabled(bytes32 _feature) view returns(bool) +func (_OPContractsManager *OPContractsManagerCallerSession) IsDevFeatureEnabled(_feature [32]byte) (bool, error) { + return _OPContractsManager.Contract.IsDevFeatureEnabled(&_OPContractsManager.CallOpts, _feature) } // OpcmDeployer is a free data retrieval call binding the contract method 0x622d56f1. @@ -563,6 +600,37 @@ func (_OPContractsManager *OPContractsManagerCallerSession) OpcmInteropMigrator( return _OPContractsManager.Contract.OpcmInteropMigrator(&_OPContractsManager.CallOpts) } +// OpcmStandardValidator is a free data retrieval call binding the contract method 0xba7903db. +// +// Solidity: function opcmStandardValidator() view returns(address) +func (_OPContractsManager *OPContractsManagerCaller) OpcmStandardValidator(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _OPContractsManager.contract.Call(opts, &out, "opcmStandardValidator") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// OpcmStandardValidator is a free data retrieval call binding the contract method 0xba7903db. +// +// Solidity: function opcmStandardValidator() view returns(address) +func (_OPContractsManager *OPContractsManagerSession) OpcmStandardValidator() (common.Address, error) { + return _OPContractsManager.Contract.OpcmStandardValidator(&_OPContractsManager.CallOpts) +} + +// OpcmStandardValidator is a free data retrieval call binding the contract method 0xba7903db. +// +// Solidity: function opcmStandardValidator() view returns(address) +func (_OPContractsManager *OPContractsManagerCallerSession) OpcmStandardValidator() (common.Address, error) { + return _OPContractsManager.Contract.OpcmStandardValidator(&_OPContractsManager.CallOpts) +} + // OpcmUpgrader is a free data retrieval call binding the contract method 0x03dbe68c. // // Solidity: function opcmUpgrader() view returns(address) @@ -687,35 +755,66 @@ func (_OPContractsManager *OPContractsManagerCallerSession) SuperchainProxyAdmin return _OPContractsManager.Contract.SuperchainProxyAdmin(&_OPContractsManager.CallOpts) } -// UpgradeController is a free data retrieval call binding the contract method 0x87543ef6. +// Validate is a free data retrieval call binding the contract method 0x30d14888. // -// Solidity: function upgradeController() view returns(address) -func (_OPContractsManager *OPContractsManagerCaller) UpgradeController(opts *bind.CallOpts) (common.Address, error) { +// Solidity: function validate((address,address,bytes32,uint256) _input, bool _allowFailure) view returns(string) +func (_OPContractsManager *OPContractsManagerCaller) Validate(opts *bind.CallOpts, _input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool) (string, error) { var out []interface{} - err := _OPContractsManager.contract.Call(opts, &out, "upgradeController") + err := _OPContractsManager.contract.Call(opts, &out, "validate", _input, _allowFailure) if err != nil { - return *new(common.Address), err + return *new(string), err } - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + out0 := *abi.ConvertType(out[0], new(string)).(*string) + + return out0, err + +} + +// Validate is a free data retrieval call binding the contract method 0x30d14888. +// +// Solidity: function validate((address,address,bytes32,uint256) _input, bool _allowFailure) view returns(string) +func (_OPContractsManager *OPContractsManagerSession) Validate(_input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool) (string, error) { + return _OPContractsManager.Contract.Validate(&_OPContractsManager.CallOpts, _input, _allowFailure) +} + +// Validate is a free data retrieval call binding the contract method 0x30d14888. +// +// Solidity: function validate((address,address,bytes32,uint256) _input, bool _allowFailure) view returns(string) +func (_OPContractsManager *OPContractsManagerCallerSession) Validate(_input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool) (string, error) { + return _OPContractsManager.Contract.Validate(&_OPContractsManager.CallOpts, _input, _allowFailure) +} + +// ValidateWithOverrides is a free data retrieval call binding the contract method 0x67cda69c. +// +// Solidity: function validateWithOverrides((address,address,bytes32,uint256) _input, bool _allowFailure, (address,address) _overrides) view returns(string) +func (_OPContractsManager *OPContractsManagerCaller) ValidateWithOverrides(opts *bind.CallOpts, _input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool, _overrides OPContractsManagerStandardValidatorValidationOverrides) (string, error) { + var out []interface{} + err := _OPContractsManager.contract.Call(opts, &out, "validateWithOverrides", _input, _allowFailure, _overrides) + + if err != nil { + return *new(string), err + } + + out0 := *abi.ConvertType(out[0], new(string)).(*string) return out0, err } -// UpgradeController is a free data retrieval call binding the contract method 0x87543ef6. +// ValidateWithOverrides is a free data retrieval call binding the contract method 0x67cda69c. // -// Solidity: function upgradeController() view returns(address) -func (_OPContractsManager *OPContractsManagerSession) UpgradeController() (common.Address, error) { - return _OPContractsManager.Contract.UpgradeController(&_OPContractsManager.CallOpts) +// Solidity: function validateWithOverrides((address,address,bytes32,uint256) _input, bool _allowFailure, (address,address) _overrides) view returns(string) +func (_OPContractsManager *OPContractsManagerSession) ValidateWithOverrides(_input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool, _overrides OPContractsManagerStandardValidatorValidationOverrides) (string, error) { + return _OPContractsManager.Contract.ValidateWithOverrides(&_OPContractsManager.CallOpts, _input, _allowFailure, _overrides) } -// UpgradeController is a free data retrieval call binding the contract method 0x87543ef6. +// ValidateWithOverrides is a free data retrieval call binding the contract method 0x67cda69c. // -// Solidity: function upgradeController() view returns(address) -func (_OPContractsManager *OPContractsManagerCallerSession) UpgradeController() (common.Address, error) { - return _OPContractsManager.Contract.UpgradeController(&_OPContractsManager.CallOpts) +// Solidity: function validateWithOverrides((address,address,bytes32,uint256) _input, bool _allowFailure, (address,address) _overrides) view returns(string) +func (_OPContractsManager *OPContractsManagerCallerSession) ValidateWithOverrides(_input OPContractsManagerStandardValidatorValidationInput, _allowFailure bool, _overrides OPContractsManagerStandardValidatorValidationOverrides) (string, error) { + return _OPContractsManager.Contract.ValidateWithOverrides(&_OPContractsManager.CallOpts, _input, _allowFailure, _overrides) } // Version is a free data retrieval call binding the contract method 0x54fd4d50. @@ -812,27 +911,6 @@ func (_OPContractsManager *OPContractsManagerTransactorSession) Migrate(_input O return _OPContractsManager.Contract.Migrate(&_OPContractsManager.TransactOpts, _input) } -// SetRC is a paid mutator transaction binding the contract method 0x6ccdfe11. -// -// Solidity: function setRC(bool _isRC) returns() -func (_OPContractsManager *OPContractsManagerTransactor) SetRC(opts *bind.TransactOpts, _isRC bool) (*types.Transaction, error) { - return _OPContractsManager.contract.Transact(opts, "setRC", _isRC) -} - -// SetRC is a paid mutator transaction binding the contract method 0x6ccdfe11. -// -// Solidity: function setRC(bool _isRC) returns() -func (_OPContractsManager *OPContractsManagerSession) SetRC(_isRC bool) (*types.Transaction, error) { - return _OPContractsManager.Contract.SetRC(&_OPContractsManager.TransactOpts, _isRC) -} - -// SetRC is a paid mutator transaction binding the contract method 0x6ccdfe11. -// -// Solidity: function setRC(bool _isRC) returns() -func (_OPContractsManager *OPContractsManagerTransactorSession) SetRC(_isRC bool) (*types.Transaction, error) { - return _OPContractsManager.Contract.SetRC(&_OPContractsManager.TransactOpts, _isRC) -} - // UpdatePrestate is a paid mutator transaction binding the contract method 0x9a72745b. // // Solidity: function updatePrestate((address,address,bytes32)[] _prestateUpdateInputs) returns() diff --git a/op-e2e/bindings/systemconfig.go b/op-e2e/bindings/systemconfig.go index 05d74beadbeb5..49be8b70df402 100644 --- a/op-e2e/bindings/systemconfig.go +++ b/op-e2e/bindings/systemconfig.go @@ -26,6 +26,7 @@ var ( _ = common.Big1 _ = types.BloomLookup _ = event.NewSubscription + _ = abi.ConvertType ) // IResourceMeteringResourceConfig is an auto generated low-level Go binding around an user-defined struct. @@ -43,14 +44,13 @@ type SystemConfigAddresses struct { L1CrossDomainMessenger common.Address L1ERC721Bridge common.Address L1StandardBridge common.Address - DisputeGameFactory common.Address OptimismPortal common.Address OptimismMintableERC20Factory common.Address } // SystemConfigMetaData contains all meta data concerning the SystemConfig contract. var SystemConfigMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"BATCH_INBOX_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"DISPUTE_GAME_FACTORY_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"L1_CROSS_DOMAIN_MESSENGER_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"L1_ERC_721_BRIDGE_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"L1_STANDARD_BRIDGE_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OPTIMISM_PORTAL_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"START_BLOCK_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"UNSAFE_BLOCK_SIGNER_SLOT\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"VERSION\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"basefeeScalar\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"batchInbox\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"batcherHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"blobbasefeeScalar\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"disputeGameFactory\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eip1559Denominator\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"eip1559Elasticity\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasLimit\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getAddresses\",\"outputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"l1CrossDomainMessenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1ERC721Bridge\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1StandardBridge\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"disputeGameFactory\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismPortal\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismMintableERC20Factory\",\"type\":\"address\"}],\"internalType\":\"structSystemConfig.Addresses\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"},{\"internalType\":\"uint32\",\"name\":\"_basefeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_blobbasefeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"bytes32\",\"name\":\"_batcherHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint64\",\"name\":\"_gasLimit\",\"type\":\"uint64\"},{\"internalType\":\"address\",\"name\":\"_unsafeBlockSigner\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint32\",\"name\":\"maxResourceLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"elasticityMultiplier\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"baseFeeMaxChangeDenominator\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"minimumBaseFee\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"systemTxMaxGas\",\"type\":\"uint32\"},{\"internalType\":\"uint128\",\"name\":\"maximumBaseFee\",\"type\":\"uint128\"}],\"internalType\":\"structIResourceMetering.ResourceConfig\",\"name\":\"_config\",\"type\":\"tuple\"},{\"internalType\":\"address\",\"name\":\"_batchInbox\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"address\",\"name\":\"l1CrossDomainMessenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1ERC721Bridge\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"l1StandardBridge\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"disputeGameFactory\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismPortal\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"optimismMintableERC20Factory\",\"type\":\"address\"}],\"internalType\":\"structSystemConfig.Addresses\",\"name\":\"_addresses\",\"type\":\"tuple\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1CrossDomainMessenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1ERC721Bridge\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1StandardBridge\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maximumGasLimit\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minimumGasLimit\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"operatorFeeConstant\",\"outputs\":[{\"internalType\":\"uint64\",\"name\":\"\",\"type\":\"uint64\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"operatorFeeScalar\",\"outputs\":[{\"internalType\":\"uint32\",\"name\":\"\",\"type\":\"uint32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"optimismMintableERC20Factory\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"optimismPortal\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"resourceConfig\",\"outputs\":[{\"components\":[{\"internalType\":\"uint32\",\"name\":\"maxResourceLimit\",\"type\":\"uint32\"},{\"internalType\":\"uint8\",\"name\":\"elasticityMultiplier\",\"type\":\"uint8\"},{\"internalType\":\"uint8\",\"name\":\"baseFeeMaxChangeDenominator\",\"type\":\"uint8\"},{\"internalType\":\"uint32\",\"name\":\"minimumBaseFee\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"systemTxMaxGas\",\"type\":\"uint32\"},{\"internalType\":\"uint128\",\"name\":\"maximumBaseFee\",\"type\":\"uint128\"}],\"internalType\":\"structIResourceMetering.ResourceConfig\",\"name\":\"\",\"type\":\"tuple\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"_batcherHash\",\"type\":\"bytes32\"}],\"name\":\"setBatcherHash\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_denominator\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_elasticity\",\"type\":\"uint32\"}],\"name\":\"setEIP1559Params\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setGasConfig\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_basefeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"uint32\",\"name\":\"_blobbasefeeScalar\",\"type\":\"uint32\"}],\"name\":\"setGasConfigEcotone\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint64\",\"name\":\"_gasLimit\",\"type\":\"uint64\"}],\"name\":\"setGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint32\",\"name\":\"_operatorFeeScalar\",\"type\":\"uint32\"},{\"internalType\":\"uint64\",\"name\":\"_operatorFeeConstant\",\"type\":\"uint64\"}],\"name\":\"setOperatorFeeScalars\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_unsafeBlockSigner\",\"type\":\"address\"}],\"name\":\"setUnsafeBlockSigner\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"startBlock\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"startBlock_\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"unsafeBlockSigner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"addr_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"version\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"enumSystemConfig.UpdateType\",\"name\":\"updateType\",\"type\":\"uint8\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"ConfigUpdate\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint8\",\"name\":\"version\",\"type\":\"uint8\"}],\"name\":\"Initialized\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"}]", + ABI: "[{\"type\":\"constructor\",\"inputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"BATCH_INBOX_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"L1_CROSS_DOMAIN_MESSENGER_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"L1_ERC_721_BRIDGE_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"L1_STANDARD_BRIDGE_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"OPTIMISM_MINTABLE_ERC20_FACTORY_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"OPTIMISM_PORTAL_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"START_BLOCK_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"UNSAFE_BLOCK_SIGNER_SLOT\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"VERSION\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"basefeeScalar\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"batchInbox\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"batcherHash\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"blobbasefeeScalar\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"disputeGameFactory\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"eip1559Denominator\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"eip1559Elasticity\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"gasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getAddresses\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structSystemConfig.Addresses\",\"components\":[{\"name\":\"l1CrossDomainMessenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1ERC721Bridge\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1StandardBridge\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismPortal\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismMintableERC20Factory\",\"type\":\"address\",\"internalType\":\"address\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"guardian\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initVersion\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint8\",\"internalType\":\"uint8\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_owner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_basefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"_blobbasefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"_batcherHash\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"_gasLimit\",\"type\":\"uint64\",\"internalType\":\"uint64\"},{\"name\":\"_unsafeBlockSigner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_config\",\"type\":\"tuple\",\"internalType\":\"structIResourceMetering.ResourceConfig\",\"components\":[{\"name\":\"maxResourceLimit\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"elasticityMultiplier\",\"type\":\"uint8\",\"internalType\":\"uint8\"},{\"name\":\"baseFeeMaxChangeDenominator\",\"type\":\"uint8\",\"internalType\":\"uint8\"},{\"name\":\"minimumBaseFee\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"systemTxMaxGas\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"maximumBaseFee\",\"type\":\"uint128\",\"internalType\":\"uint128\"}]},{\"name\":\"_batchInbox\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_addresses\",\"type\":\"tuple\",\"internalType\":\"structSystemConfig.Addresses\",\"components\":[{\"name\":\"l1CrossDomainMessenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1ERC721Bridge\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"l1StandardBridge\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismPortal\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"optimismMintableERC20Factory\",\"type\":\"address\",\"internalType\":\"address\"}]},{\"name\":\"_l2ChainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_superchainConfig\",\"type\":\"address\",\"internalType\":\"contractISuperchainConfig\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"isFeatureEnabled\",\"inputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"l1CrossDomainMessenger\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"l1ERC721Bridge\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"l1StandardBridge\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"l2ChainId\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"maximumGasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"minBaseFee\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"minimumGasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"operatorFeeConstant\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"operatorFeeScalar\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"optimismMintableERC20Factory\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"optimismPortal\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"overhead\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"paused\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"proxyAdmin\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractIProxyAdmin\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"proxyAdminOwner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"resourceConfig\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"tuple\",\"internalType\":\"structIResourceMetering.ResourceConfig\",\"components\":[{\"name\":\"maxResourceLimit\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"elasticityMultiplier\",\"type\":\"uint8\",\"internalType\":\"uint8\"},{\"name\":\"baseFeeMaxChangeDenominator\",\"type\":\"uint8\",\"internalType\":\"uint8\"},{\"name\":\"minimumBaseFee\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"systemTxMaxGas\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"maximumBaseFee\",\"type\":\"uint128\",\"internalType\":\"uint128\"}]}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"scalar\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"setBatcherHash\",\"inputs\":[{\"name\":\"_batcherHash\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setEIP1559Params\",\"inputs\":[{\"name\":\"_denominator\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"_elasticity\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setFeature\",\"inputs\":[{\"name\":\"_feature\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"},{\"name\":\"_enabled\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setGasConfig\",\"inputs\":[{\"name\":\"_overhead\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_scalar\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setGasConfigEcotone\",\"inputs\":[{\"name\":\"_basefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"_blobbasefeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setGasLimit\",\"inputs\":[{\"name\":\"_gasLimit\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setMinBaseFee\",\"inputs\":[{\"name\":\"_minBaseFee\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setOperatorFeeScalars\",\"inputs\":[{\"name\":\"_operatorFeeScalar\",\"type\":\"uint32\",\"internalType\":\"uint32\"},{\"name\":\"_operatorFeeConstant\",\"type\":\"uint64\",\"internalType\":\"uint64\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"setUnsafeBlockSigner\",\"inputs\":[{\"name\":\"_unsafeBlockSigner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"startBlock\",\"inputs\":[],\"outputs\":[{\"name\":\"startBlock_\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"superchainConfig\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"contractISuperchainConfig\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"unsafeBlockSigner\",\"inputs\":[],\"outputs\":[{\"name\":\"addr_\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"upgrade\",\"inputs\":[{\"name\":\"_l2ChainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_superchainConfig\",\"type\":\"address\",\"internalType\":\"contractISuperchainConfig\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"pure\"},{\"type\":\"event\",\"name\":\"ConfigUpdate\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint256\",\"indexed\":true,\"internalType\":\"uint256\"},{\"name\":\"updateType\",\"type\":\"uint8\",\"indexed\":true,\"internalType\":\"enumSystemConfig.UpdateType\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"FeatureSet\",\"inputs\":[{\"name\":\"feature\",\"type\":\"bytes32\",\"indexed\":true,\"internalType\":\"bytes32\"},{\"name\":\"enabled\",\"type\":\"bool\",\"indexed\":true,\"internalType\":\"bool\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint8\",\"indexed\":false,\"internalType\":\"uint8\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_NotProxyAdmin\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_NotProxyAdminOwner\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_NotResolvedDelegateProxy\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_NotSharedProxyAdminOwner\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ProxyAdminOwnedBase_ProxyAdminNotFound\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ReinitializableBase_ZeroInitVersion\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"SystemConfig_InvalidFeatureState\",\"inputs\":[]}]", } // SystemConfigABI is the input ABI used to generate the binding from. @@ -154,11 +154,11 @@ func NewSystemConfigFilterer(address common.Address, filterer bind.ContractFilte // bindSystemConfig binds a generic wrapper to an already deployed contract. func bindSystemConfig(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(SystemConfigABI)) + parsed, err := SystemConfigMetaData.GetAbi() if err != nil { return nil, err } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil } // Call invokes the (constant) contract method with params as input values and @@ -230,37 +230,6 @@ func (_SystemConfig *SystemConfigCallerSession) BATCHINBOXSLOT() ([32]byte, erro return _SystemConfig.Contract.BATCHINBOXSLOT(&_SystemConfig.CallOpts) } -// DISPUTEGAMEFACTORYSLOT is a free data retrieval call binding the contract method 0xe2a3285c. -// -// Solidity: function DISPUTE_GAME_FACTORY_SLOT() view returns(bytes32) -func (_SystemConfig *SystemConfigCaller) DISPUTEGAMEFACTORYSLOT(opts *bind.CallOpts) ([32]byte, error) { - var out []interface{} - err := _SystemConfig.contract.Call(opts, &out, "DISPUTE_GAME_FACTORY_SLOT") - - if err != nil { - return *new([32]byte), err - } - - out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) - - return out0, err - -} - -// DISPUTEGAMEFACTORYSLOT is a free data retrieval call binding the contract method 0xe2a3285c. -// -// Solidity: function DISPUTE_GAME_FACTORY_SLOT() view returns(bytes32) -func (_SystemConfig *SystemConfigSession) DISPUTEGAMEFACTORYSLOT() ([32]byte, error) { - return _SystemConfig.Contract.DISPUTEGAMEFACTORYSLOT(&_SystemConfig.CallOpts) -} - -// DISPUTEGAMEFACTORYSLOT is a free data retrieval call binding the contract method 0xe2a3285c. -// -// Solidity: function DISPUTE_GAME_FACTORY_SLOT() view returns(bytes32) -func (_SystemConfig *SystemConfigCallerSession) DISPUTEGAMEFACTORYSLOT() ([32]byte, error) { - return _SystemConfig.Contract.DISPUTEGAMEFACTORYSLOT(&_SystemConfig.CallOpts) -} - // L1CROSSDOMAINMESSENGERSLOT is a free data retrieval call binding the contract method 0x5d73369c. // // Solidity: function L1_CROSS_DOMAIN_MESSENGER_SLOT() view returns(bytes32) @@ -759,7 +728,7 @@ func (_SystemConfig *SystemConfigCallerSession) GasLimit() (uint64, error) { // GetAddresses is a free data retrieval call binding the contract method 0xa39fac12. // -// Solidity: function getAddresses() view returns((address,address,address,address,address,address)) +// Solidity: function getAddresses() view returns((address,address,address,address,address)) func (_SystemConfig *SystemConfigCaller) GetAddresses(opts *bind.CallOpts) (SystemConfigAddresses, error) { var out []interface{} err := _SystemConfig.contract.Call(opts, &out, "getAddresses") @@ -776,14 +745,14 @@ func (_SystemConfig *SystemConfigCaller) GetAddresses(opts *bind.CallOpts) (Syst // GetAddresses is a free data retrieval call binding the contract method 0xa39fac12. // -// Solidity: function getAddresses() view returns((address,address,address,address,address,address)) +// Solidity: function getAddresses() view returns((address,address,address,address,address)) func (_SystemConfig *SystemConfigSession) GetAddresses() (SystemConfigAddresses, error) { return _SystemConfig.Contract.GetAddresses(&_SystemConfig.CallOpts) } // GetAddresses is a free data retrieval call binding the contract method 0xa39fac12. // -// Solidity: function getAddresses() view returns((address,address,address,address,address,address)) +// Solidity: function getAddresses() view returns((address,address,address,address,address)) func (_SystemConfig *SystemConfigCallerSession) GetAddresses() (SystemConfigAddresses, error) { return _SystemConfig.Contract.GetAddresses(&_SystemConfig.CallOpts) } @@ -912,6 +881,37 @@ func (_SystemConfig *SystemConfigCallerSession) MaximumGasLimit() (uint64, error return _SystemConfig.Contract.MaximumGasLimit(&_SystemConfig.CallOpts) } +// MinBaseFee is a free data retrieval call binding the contract method 0xa62611a2. +// +// Solidity: function minBaseFee() view returns(uint64) +func (_SystemConfig *SystemConfigCaller) MinBaseFee(opts *bind.CallOpts) (uint64, error) { + var out []interface{} + err := _SystemConfig.contract.Call(opts, &out, "minBaseFee") + + if err != nil { + return *new(uint64), err + } + + out0 := *abi.ConvertType(out[0], new(uint64)).(*uint64) + + return out0, err + +} + +// MinBaseFee is a free data retrieval call binding the contract method 0xa62611a2. +// +// Solidity: function minBaseFee() view returns(uint64) +func (_SystemConfig *SystemConfigSession) MinBaseFee() (uint64, error) { + return _SystemConfig.Contract.MinBaseFee(&_SystemConfig.CallOpts) +} + +// MinBaseFee is a free data retrieval call binding the contract method 0xa62611a2. +// +// Solidity: function minBaseFee() view returns(uint64) +func (_SystemConfig *SystemConfigCallerSession) MinBaseFee() (uint64, error) { + return _SystemConfig.Contract.MinBaseFee(&_SystemConfig.CallOpts) +} + // MinimumGasLimit is a free data retrieval call binding the contract method 0x4add321d. // // Solidity: function minimumGasLimit() view returns(uint64) @@ -1284,25 +1284,25 @@ func (_SystemConfig *SystemConfigCallerSession) Version() (string, error) { return _SystemConfig.Contract.Version(&_SystemConfig.CallOpts) } -// Initialize is a paid mutator transaction binding the contract method 0xca407f0c. +// Initialize is a paid mutator transaction binding the contract method 0x01045829. // -// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address,address) _addresses) returns() -func (_SystemConfig *SystemConfigTransactor) Initialize(opts *bind.TransactOpts, _owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses) (*types.Transaction, error) { - return _SystemConfig.contract.Transact(opts, "initialize", _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses) +// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address) _addresses, uint256 _l2ChainId, address _superchainConfig) returns() +func (_SystemConfig *SystemConfigTransactor) Initialize(opts *bind.TransactOpts, _owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses, _l2ChainId *big.Int, _superchainConfig common.Address) (*types.Transaction, error) { + return _SystemConfig.contract.Transact(opts, "initialize", _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses, _l2ChainId, _superchainConfig) } -// Initialize is a paid mutator transaction binding the contract method 0xca407f0c. +// Initialize is a paid mutator transaction binding the contract method 0x01045829. // -// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address,address) _addresses) returns() -func (_SystemConfig *SystemConfigSession) Initialize(_owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses) (*types.Transaction, error) { - return _SystemConfig.Contract.Initialize(&_SystemConfig.TransactOpts, _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses) +// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address) _addresses, uint256 _l2ChainId, address _superchainConfig) returns() +func (_SystemConfig *SystemConfigSession) Initialize(_owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses, _l2ChainId *big.Int, _superchainConfig common.Address) (*types.Transaction, error) { + return _SystemConfig.Contract.Initialize(&_SystemConfig.TransactOpts, _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses, _l2ChainId, _superchainConfig) } -// Initialize is a paid mutator transaction binding the contract method 0xca407f0c. +// Initialize is a paid mutator transaction binding the contract method 0x01045829. // -// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address,address) _addresses) returns() -func (_SystemConfig *SystemConfigTransactorSession) Initialize(_owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses) (*types.Transaction, error) { - return _SystemConfig.Contract.Initialize(&_SystemConfig.TransactOpts, _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses) +// Solidity: function initialize(address _owner, uint32 _basefeeScalar, uint32 _blobbasefeeScalar, bytes32 _batcherHash, uint64 _gasLimit, address _unsafeBlockSigner, (uint32,uint8,uint8,uint32,uint32,uint128) _config, address _batchInbox, (address,address,address,address,address) _addresses, uint256 _l2ChainId, address _superchainConfig) returns() +func (_SystemConfig *SystemConfigTransactorSession) Initialize(_owner common.Address, _basefeeScalar uint32, _blobbasefeeScalar uint32, _batcherHash [32]byte, _gasLimit uint64, _unsafeBlockSigner common.Address, _config IResourceMeteringResourceConfig, _batchInbox common.Address, _addresses SystemConfigAddresses, _l2ChainId *big.Int, _superchainConfig common.Address) (*types.Transaction, error) { + return _SystemConfig.Contract.Initialize(&_SystemConfig.TransactOpts, _owner, _basefeeScalar, _blobbasefeeScalar, _batcherHash, _gasLimit, _unsafeBlockSigner, _config, _batchInbox, _addresses, _l2ChainId, _superchainConfig) } // RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. @@ -1431,6 +1431,27 @@ func (_SystemConfig *SystemConfigTransactorSession) SetGasLimit(_gasLimit uint64 return _SystemConfig.Contract.SetGasLimit(&_SystemConfig.TransactOpts, _gasLimit) } +// SetMinBaseFee is a paid mutator transaction binding the contract method 0x7616f0e8. +// +// Solidity: function setMinBaseFee(uint64 _minBaseFee) returns() +func (_SystemConfig *SystemConfigTransactor) SetMinBaseFee(opts *bind.TransactOpts, _minBaseFee uint64) (*types.Transaction, error) { + return _SystemConfig.contract.Transact(opts, "setMinBaseFee", _minBaseFee) +} + +// SetMinBaseFee is a paid mutator transaction binding the contract method 0x7616f0e8. +// +// Solidity: function setMinBaseFee(uint64 _minBaseFee) returns() +func (_SystemConfig *SystemConfigSession) SetMinBaseFee(_minBaseFee uint64) (*types.Transaction, error) { + return _SystemConfig.Contract.SetMinBaseFee(&_SystemConfig.TransactOpts, _minBaseFee) +} + +// SetMinBaseFee is a paid mutator transaction binding the contract method 0x7616f0e8. +// +// Solidity: function setMinBaseFee(uint64 _minBaseFee) returns() +func (_SystemConfig *SystemConfigTransactorSession) SetMinBaseFee(_minBaseFee uint64) (*types.Transaction, error) { + return _SystemConfig.Contract.SetMinBaseFee(&_SystemConfig.TransactOpts, _minBaseFee) +} + // SetOperatorFeeScalars is a paid mutator transaction binding the contract method 0x155b6c6f. // // Solidity: function setOperatorFeeScalars(uint32 _operatorFeeScalar, uint64 _operatorFeeConstant) returns() diff --git a/op-e2e/config/addresses.json.gz b/op-e2e/config/addresses.json.gz deleted file mode 100644 index 19891ac2cd9fb..0000000000000 Binary files a/op-e2e/config/addresses.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l1.json.gz b/op-e2e/config/allocs-l1.json.gz deleted file mode 100644 index 716554482f110..0000000000000 Binary files a/op-e2e/config/allocs-l1.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-delta.json.gz b/op-e2e/config/allocs-l2-delta.json.gz deleted file mode 100644 index eefc554028d75..0000000000000 Binary files a/op-e2e/config/allocs-l2-delta.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-ecotone.json.gz b/op-e2e/config/allocs-l2-ecotone.json.gz deleted file mode 100644 index 84a43f48f5118..0000000000000 Binary files a/op-e2e/config/allocs-l2-ecotone.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-fjord.json.gz b/op-e2e/config/allocs-l2-fjord.json.gz deleted file mode 100644 index 70937974ed234..0000000000000 Binary files a/op-e2e/config/allocs-l2-fjord.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-granite.json.gz b/op-e2e/config/allocs-l2-granite.json.gz deleted file mode 100644 index 8290920480eb1..0000000000000 Binary files a/op-e2e/config/allocs-l2-granite.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-holocene.json.gz b/op-e2e/config/allocs-l2-holocene.json.gz deleted file mode 100644 index 86aa8ead4725e..0000000000000 Binary files a/op-e2e/config/allocs-l2-holocene.json.gz and /dev/null differ diff --git a/op-e2e/config/allocs-l2-isthmus.json.gz b/op-e2e/config/allocs-l2-isthmus.json.gz deleted file mode 100644 index 86aa8ead4725e..0000000000000 Binary files a/op-e2e/config/allocs-l2-isthmus.json.gz and /dev/null differ diff --git a/op-e2e/config/init.go b/op-e2e/config/init.go index 60fd3a3c2922b..c60c345278a64 100644 --- a/op-e2e/config/init.go +++ b/op-e2e/config/init.go @@ -1,7 +1,6 @@ package config import ( - "compress/gzip" "context" "encoding/json" "fmt" @@ -17,6 +16,7 @@ import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/inspect" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/pipeline" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum/go-ethereum/common" @@ -51,7 +51,6 @@ type AllocType string const ( AllocTypeAltDA AllocType = "alt-da" - AllocTypeL2OO AllocType = "l2oo" AllocTypeMTCannon AllocType = "mt-cannon" AllocTypeMTCannonNext AllocType = "mt-cannon-next" @@ -74,7 +73,7 @@ func (a AllocType) UsesProofs() bool { } } -var allocTypes = []AllocType{AllocTypeAltDA, AllocTypeL2OO, AllocTypeMTCannon, AllocTypeMTCannonNext} +var allocTypes = []AllocType{AllocTypeAltDA, AllocTypeMTCannon, AllocTypeMTCannonNext} var ( // All of the following variables are set in the init function @@ -144,6 +143,10 @@ func DeployConfig(allocType AllocType) *genesis.DeployConfig { } func init() { + // Used by the rust team, to skip legacy op-e2e init. Not used by devstack acceptance tests. + if os.Getenv("DISABLE_OP_E2E_LEGACY") == "true" { + return + } cwd, err := os.Getwd() if err != nil { panic(err) @@ -185,50 +188,9 @@ func init() { oplog.SetGlobalLogHandler(errHandler) for _, allocType := range allocTypes { - if allocType == AllocTypeL2OO { - continue - } - initAllocType(root, allocType) } - configPath := path.Join(root, "op-e2e", "config") - forks := []genesis.L2AllocsMode{ - genesis.L2AllocsIsthmus, - genesis.L2AllocsHolocene, - genesis.L2AllocsGranite, - genesis.L2AllocsFjord, - genesis.L2AllocsEcotone, - genesis.L2AllocsDelta, - } - - var l2OOAllocsL1 foundry.ForgeAllocs - decompressGzipJSON(path.Join(configPath, "allocs-l1.json.gz"), &l2OOAllocsL1) - l1AllocsByType[AllocTypeL2OO] = &l2OOAllocsL1 - - var l2OOAddresses genesis.L1Deployments - decompressGzipJSON(path.Join(configPath, "addresses.json.gz"), &l2OOAddresses) - l1DeploymentsByType[AllocTypeL2OO] = &l2OOAddresses - - l2OODC := DeployConfig(DefaultAllocType) - l2OODC.SetDeployments(&l2OOAddresses) - deployConfigsByType[AllocTypeL2OO] = l2OODC - - l2AllocsByType[AllocTypeL2OO] = genesis.L2AllocsModeMap{} - var wg sync.WaitGroup - for _, fork := range forks { - wg.Add(1) - go func(fork genesis.L2AllocsMode) { - defer wg.Done() - var l2OOAllocsL2 foundry.ForgeAllocs - decompressGzipJSON(path.Join(configPath, fmt.Sprintf("allocs-l2-%s.json.gz", fork)), &l2OOAllocsL2) - mtx.Lock() - l2AllocsByType[AllocTypeL2OO][fork] = &l2OOAllocsL2 - mtx.Unlock() - }(fork) - } - wg.Wait() - // Use regular level going forward. oplog.SetGlobalLogHandler(handler) } @@ -248,6 +210,7 @@ func initAllocType(root string, allocType AllocType) { allocModes := []genesis.L2AllocsMode{ genesis.L2AllocsInterop, + genesis.L2AllocsJovian, genesis.L2AllocsIsthmus, genesis.L2AllocsHolocene, genesis.L2AllocsGranite, @@ -289,6 +252,7 @@ func initAllocType(root string, allocType AllocType) { "l2GenesisGraniteTimeOffset": nil, "l2GenesisHoloceneTimeOffset": nil, "l2GenesisIsthmusTimeOffset": nil, + "l2GenesisJovianTimeOffset": nil, // SWC changes "deploySoulGasToken": true, "isSoulBackedByNative": true, @@ -405,7 +369,7 @@ func defaultIntent(root string, loc *artifacts.Locator, deployer common.Address, "gasPriceOracleOverhead": 2100, "gasPriceOracleScalar": 1000000, "gasPriceOracleBaseFeeScalar": 1368, - "gasPriceOracleBlobBaseFeeScalar": 810949, + "gasPriceOracleBlobBaseFeeScalar": 801949, "gasPriceOracleOperatorFeeScalar": 0, "gasPriceOracleOperatorFeeConstant": 0, "l1CancunTimeOffset": "0x0", @@ -432,6 +396,7 @@ func defaultIntent(root string, loc *artifacts.Locator, deployer common.Address, Eip1559Denominator: 250, Eip1559DenominatorCanyon: 250, Eip1559Elasticity: 6, + GasLimit: standard.GasLimit, Roles: state.ChainRoles{ // Use deployer as L1PAO to deploy additional dispute impls L1ProxyAdminOwner: deployer, @@ -499,23 +464,6 @@ func ensureDir(dirPath string) error { return nil } -func decompressGzipJSON(p string, thing any) { - f, err := os.Open(p) - if err != nil { - panic(fmt.Errorf("failed to open file: %w", err)) - } - defer f.Close() - - gzr, err := gzip.NewReader(f) - if err != nil { - panic(fmt.Errorf("failed to create gzip reader: %w", err)) - } - defer gzr.Close() - if err := json.NewDecoder(gzr).Decode(thing); err != nil { - panic(fmt.Errorf("failed to read gzip data: %w", err)) - } -} - func cannonVMType(allocType AllocType) state.VMType { if allocType == AllocTypeMTCannonNext { return state.VMTypeCannonNext diff --git a/op-e2e/e2eutils/blobs.go b/op-e2e/e2eutils/blobstore/blobs.go similarity index 77% rename from op-e2e/e2eutils/blobs.go rename to op-e2e/e2eutils/blobstore/blobs.go index 791130470eda5..45fcc5dfdddcd 100644 --- a/op-e2e/e2eutils/blobs.go +++ b/op-e2e/e2eutils/blobstore/blobs.go @@ -1,4 +1,4 @@ -package e2eutils +package blobstore import ( "context" @@ -11,17 +11,17 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) -// BlobsStore is a simple in-memory store of blobs, for testing purposes -type BlobsStore struct { +// Store is a simple in-memory store of blobs, for testing purposes +type Store struct { // block timestamp -> blob versioned hash -> blob blobs map[uint64]map[eth.IndexedBlobHash]*eth.Blob } -func NewBlobStore() *BlobsStore { - return &BlobsStore{blobs: make(map[uint64]map[eth.IndexedBlobHash]*eth.Blob)} +func New() *Store { + return &Store{blobs: make(map[uint64]map[eth.IndexedBlobHash]*eth.Blob)} } -func (store *BlobsStore) StoreBlob(blockTime uint64, indexedHash eth.IndexedBlobHash, blob *eth.Blob) { +func (store *Store) StoreBlob(blockTime uint64, indexedHash eth.IndexedBlobHash, blob *eth.Blob) { m, ok := store.blobs[blockTime] if !ok { m = make(map[eth.IndexedBlobHash]*eth.Blob) @@ -30,7 +30,7 @@ func (store *BlobsStore) StoreBlob(blockTime uint64, indexedHash eth.IndexedBlob m[indexedHash] = blob } -func (store *BlobsStore) GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error) { +func (store *Store) GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error) { out := make([]*eth.Blob, 0, len(hashes)) m, ok := store.blobs[ref.Time] if !ok { @@ -46,7 +46,7 @@ func (store *BlobsStore) GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashe return out, nil } -func (store *BlobsStore) GetBlobSidecars(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.BlobSidecar, error) { +func (store *Store) GetBlobSidecars(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.BlobSidecar, error) { out := make([]*eth.BlobSidecar, 0, len(hashes)) m, ok := store.blobs[ref.Time] if !ok { @@ -79,7 +79,7 @@ func (store *BlobsStore) GetBlobSidecars(ctx context.Context, ref eth.L1BlockRef return out, nil } -func (store *BlobsStore) GetAllSidecars(ctx context.Context, l1Timestamp uint64) ([]*eth.BlobSidecar, error) { +func (store *Store) GetAllSidecars(ctx context.Context, l1Timestamp uint64) ([]*eth.BlobSidecar, error) { m, ok := store.blobs[l1Timestamp] if !ok { return nil, fmt.Errorf("no blobs known with given time: %w", ethereum.NotFound) @@ -108,4 +108,4 @@ func (store *BlobsStore) GetAllSidecars(ctx context.Context, l1Timestamp uint64) return out, nil } -var _ derive.L1BlobsFetcher = (*BlobsStore)(nil) +var _ derive.L1BlobsFetcher = (*Store)(nil) diff --git a/op-e2e/e2eutils/challenger/helper.go b/op-e2e/e2eutils/challenger/helper.go index fbe61f691a55b..2b4816bd53e26 100644 --- a/op-e2e/e2eutils/challenger/helper.go +++ b/op-e2e/e2eutils/challenger/helper.go @@ -43,6 +43,7 @@ type EndpointProvider interface { type System interface { RollupCfgs() []*rollup.Config + L1Genesis() *core.Genesis L2Geneses() []*core.Genesis PrestateVariant() shared.PrestateVariant } @@ -92,6 +93,18 @@ func WithPollInterval(pollInterval time.Duration) Option { } } +func WithResponseDelay(responseDelay time.Duration) Option { + return func(c *config.Config) { + c.ResponseDelay = responseDelay + } +} + +func WithResponseDelayAfter(responseDelayAfter uint64) Option { + return func(c *config.Config) { + c.ResponseDelayAfter = responseDelayAfter + } +} + func WithValidPrestateRequired() Option { return func(c *config.Config) { c.AllowInvalidPrestate = false @@ -121,21 +134,21 @@ func handleOptError(t *testing.T, opt shared.Option) Option { } func WithCannon(t *testing.T, system System) Option { return func(c *config.Config) { - handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L2Geneses(), system.PrestateVariant()))(c) + handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L1Genesis(), system.L2Geneses(), system.PrestateVariant()))(c) handleOptError(t, shared.WithCannonTraceType())(c) } } func WithPermissioned(t *testing.T, system System) Option { return func(c *config.Config) { - handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L2Geneses(), system.PrestateVariant()))(c) + handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L1Genesis(), system.L2Geneses(), system.PrestateVariant()))(c) handleOptError(t, shared.WithPermissionedTraceType())(c) } } func WithSuperCannon(t *testing.T, system System) Option { return func(c *config.Config) { - handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L2Geneses(), system.PrestateVariant()))(c) + handleOptError(t, shared.WithCannonConfig(system.RollupCfgs(), system.L1Genesis(), system.L2Geneses(), system.PrestateVariant()))(c) handleOptError(t, shared.WithSuperCannonTraceType())(c) } } diff --git a/op-e2e/e2eutils/contracts/src/emit.sol b/op-e2e/e2eutils/contracts/src/emit.sol index 5464bb7bbe6e6..0b2b76fef2ca7 100644 --- a/op-e2e/e2eutils/contracts/src/emit.sol +++ b/op-e2e/e2eutils/contracts/src/emit.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.15; contract EmitEvent { // Define an event that logs the emitted data - event DataEmitted(bytes indexed _data); + event DataEmitted(bytes indexed data); // Function that takes calldata and emits the data as an event function emitData(bytes calldata _data) external { diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index d3b061f1f9857..b98a200b639b7 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -89,6 +89,7 @@ type DisputeSystem interface { DisputeGameFactoryAddr() common.Address RollupCfgs() []*rollup.Config DependencySet() *depset.StaticConfigDependencySet + L1Genesis() *core.Genesis L2Geneses() []*core.Genesis PrestateVariant() shared.PrestateVariant diff --git a/op-e2e/e2eutils/disputegame/super_cannon_helper.go b/op-e2e/e2eutils/disputegame/super_cannon_helper.go index 3d14883eb4a65..0a0967dfac92c 100644 --- a/op-e2e/e2eutils/disputegame/super_cannon_helper.go +++ b/op-e2e/e2eutils/disputegame/super_cannon_helper.go @@ -194,3 +194,29 @@ func (g *SuperCannonGameHelper) createSuperTraceProvider(ctx context.Context) *s require.NoError(g.T, err, "failed to create rollup configs") return super.NewSuperTraceProvider(logger, rollupCfgs, prestateProvider, rootProvider, l1Head, splitDepth, prestateTimestamp, poststateTimestamp) } + +// InitFirstDerivationGame builds a top-level game whose deepest node (at splitDepth) asserts the first +// output-root derivation that follows the prestate (timestamp=1, step<=1). +// Returns the claim positioned at splitDepth, which is the parent of the constructed execution subgame root. +func (g *SuperCannonGameHelper) InitFirstDerivationGame(ctx context.Context, correctTrace *OutputHonestHelper) *ClaimHelper { + splitDepth := g.SplitDepth(ctx) + g.Require.EqualValues(splitDepth, 30, "this operation assumes a specific split depth") + claim := g.RootClaim(ctx) + + // We identify the one required right bisection that ensures that an execution game is positioned to derive the first output root + // This occurs at splitDepth-log(StepsPerTimestamp). + for { + if claim.Depth() == splitDepth-8 { + claim = correctTrace.AttackClaim(ctx, claim) // invalid attack to ensure that the honest actor bisects right + claim = correctTrace.DefendClaim(ctx, claim) + } else { + claim = claim.Attack(ctx, common.Hash{0x01}) + claim = correctTrace.AttackClaim(ctx, claim) + } + g.LogGameData(ctx) + if claim.Depth() == splitDepth { + break + } + } + return claim +} diff --git a/op-e2e/e2eutils/disputegame/super_dispute_system.go b/op-e2e/e2eutils/disputegame/super_dispute_system.go index f13ade2158a7a..f3093066cb661 100644 --- a/op-e2e/e2eutils/disputegame/super_dispute_system.go +++ b/op-e2e/e2eutils/disputegame/super_dispute_system.go @@ -99,6 +99,10 @@ func (s *SuperDisputeSystem) DependencySet() *depset.StaticConfigDependencySet { return s.sys.DependencySet() } +func (s *SuperDisputeSystem) L1Genesis() *core.Genesis { + return s.sys.L1Genesis() +} + func (s *SuperDisputeSystem) L2Geneses() []*core.Genesis { networks := s.sys.L2IDs() cfgs := make([]*core.Genesis, len(networks)) diff --git a/op-e2e/e2eutils/fakebeacon/blobs.go b/op-e2e/e2eutils/fakebeacon/blobs.go index 59dc43416e7fb..f6214d3c54710 100644 --- a/op-e2e/e2eutils/fakebeacon/blobs.go +++ b/op-e2e/e2eutils/fakebeacon/blobs.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/beacon/engine" @@ -29,7 +29,7 @@ type FakeBeacon struct { log log.Logger // in-memory blob store - blobStore *e2eutils.BlobsStore + blobStore *blobstore.Store blobsLock sync.Mutex beaconSrv *http.Server @@ -39,7 +39,7 @@ type FakeBeacon struct { blockTime uint64 } -func NewBeacon(log log.Logger, blobStore *e2eutils.BlobsStore, genesisTime uint64, blockTime uint64) *FakeBeacon { +func NewBeacon(log log.Logger, blobStore *blobstore.Store, genesisTime uint64, blockTime uint64) *FakeBeacon { return &FakeBeacon{ log: log, blobStore: blobStore, diff --git a/op-e2e/e2eutils/geth/fakepos.go b/op-e2e/e2eutils/geth/fakepos.go index 2c75c47c6a6b5..b84c9216a7337 100644 --- a/op-e2e/e2eutils/geth/fakepos.go +++ b/op-e2e/e2eutils/geth/fakepos.go @@ -1,8 +1,10 @@ package geth import ( + "context" "encoding/binary" "errors" + "fmt" "math/big" "math/rand" "time" @@ -13,10 +15,10 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" - "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-service/clock" opeth "github.com/ethereum-optimism/optimism/op-service/eth" @@ -31,7 +33,7 @@ type Beacon interface { // to build a fake proof-of-stake L1 chain with fixed block time and basic lagging safe/finalized blocks. type FakePoS struct { clock clock.Clock - eth *eth.Ethereum + eth Backend log log.Logger blockTime uint64 @@ -40,10 +42,45 @@ type FakePoS struct { finalizedDistance uint64 safeDistance uint64 - engineAPI *catalyst.ConsensusAPI + engineAPI EngineAPI sub ethereum.Subscription beacon Beacon + + config *params.ChainConfig +} + +type Backend interface { + // HeaderByNumber is assumed to behave the same as go-ethereum/ethclient.Client.HeaderByNumber. + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) +} + +type EngineAPI interface { + ForkchoiceUpdatedV3(engine.ForkchoiceStateV1, *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) + ForkchoiceUpdatedV2(engine.ForkchoiceStateV1, *engine.PayloadAttributes) (engine.ForkChoiceResponse, error) + + GetPayloadV5(engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) + GetPayloadV4(engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) + GetPayloadV3(engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) + GetPayloadV2(engine.PayloadID) (*engine.ExecutionPayloadEnvelope, error) + + NewPayloadV4(engine.ExecutableData, []common.Hash, *common.Hash, []hexutil.Bytes) (engine.PayloadStatusV1, error) + NewPayloadV3(engine.ExecutableData, []common.Hash, *common.Hash) (engine.PayloadStatusV1, error) + NewPayloadV2(engine.ExecutableData) (engine.PayloadStatusV1, error) +} + +func NewFakePoS(backend Backend, engineAPI EngineAPI, c clock.Clock, logger log.Logger, blockTime uint64, finalizedDistance uint64, beacon Beacon, config *params.ChainConfig) *FakePoS { + return &FakePoS{ + clock: c, + eth: backend, + log: logger, + blockTime: blockTime, + finalizedDistance: finalizedDistance, + safeDistance: 10, + engineAPI: engineAPI, + beacon: beacon, + config: config, + } } func (f *FakePoS) FakeBeaconBlockRoot(time uint64) common.Hash { @@ -57,27 +94,42 @@ func (f *FakePoS) Start() error { advancing.Start() } withdrawalsRNG := rand.New(rand.NewSource(450368975843)) // avoid generating the same address as any test + genesisHeader, err := f.eth.HeaderByNumber(context.Background(), new(big.Int)) + if err != nil { + return fmt.Errorf("get genesis header: %w", err) + } f.sub = event.NewSubscription(func(quit <-chan struct{}) error { // poll every half a second: enough to catch up with any block time when ticks are missed t := f.clock.NewTicker(time.Second / 2) for { select { case now := <-t.Ch(): - chain := f.eth.BlockChain() - head := chain.CurrentBlock() - finalized := chain.CurrentFinalBlock() - if finalized == nil { // fallback to genesis if nothing is finalized - finalized = chain.Genesis().Header() - } - safe := chain.CurrentSafeBlock() - if safe == nil { // fallback to finalized if nothing is safe + head, err := f.eth.HeaderByNumber(context.Background(), nil) + if err != nil { + f.log.Warn("Failed to obtain latest header", "err", err) + continue + } + finalized, err := f.eth.HeaderByNumber(context.Background(), big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + finalized = genesisHeader // fallback to genesis if nothing is finalized + } + safe, err := f.eth.HeaderByNumber(context.Background(), big.NewInt(int64(rpc.SafeBlockNumber))) + if err != nil { // fallback to finalized if nothing is safe safe = finalized } if head.Number.Uint64() > f.finalizedDistance { // progress finalized block, if we can - finalized = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.finalizedDistance) + finalized, err = f.eth.HeaderByNumber(context.Background(), new(big.Int).SetUint64(head.Number.Uint64()-f.finalizedDistance)) + if err != nil { + f.log.Warn("Failed to finalized header", "err", err) + continue + } } if head.Number.Uint64() > f.safeDistance { // progress safe block, if we can - safe = f.eth.BlockChain().GetHeaderByNumber(head.Number.Uint64() - f.safeDistance) + safe, err = f.eth.HeaderByNumber(context.Background(), new(big.Int).SetUint64(head.Number.Uint64()-f.safeDistance)) + if err != nil { + f.log.Warn("Failed to safe header", "err", err) + continue + } } // start building the block as soon as we are past the current head time if head.Time >= uint64(now.Unix()) { @@ -106,8 +158,10 @@ func (f *FakePoS) Start() error { Withdrawals: withdrawals, } parentBeaconBlockRoot := f.FakeBeaconBlockRoot(head.Time) // parent beacon block root - isCancun := f.eth.BlockChain().Config().IsCancun(new(big.Int).SetUint64(head.Number.Uint64()+1), newBlockTime) - isPrague := f.eth.BlockChain().Config().IsPrague(new(big.Int).SetUint64(head.Number.Uint64()+1), newBlockTime) + nextHeight := new(big.Int).SetUint64(head.Number.Uint64() + 1) + isCancun := f.config.IsCancun(nextHeight, newBlockTime) + isPrague := f.config.IsPrague(nextHeight, newBlockTime) + isOsaka := f.config.IsOsaka(nextHeight, newBlockTime) if isCancun { attrs.BeaconRoot = &parentBeaconBlockRoot } @@ -116,7 +170,6 @@ func (f *FakePoS) Start() error { SafeBlockHash: safe.Hash(), FinalizedBlockHash: finalized.Hash(), } - var err error var res engine.ForkChoiceResponse if isCancun { res, err = f.engineAPI.ForkchoiceUpdatedV3(fcState, attrs) @@ -142,7 +195,9 @@ func (f *FakePoS) Start() error { return nil } var envelope *engine.ExecutionPayloadEnvelope - if isPrague { + if isOsaka { + envelope, err = f.engineAPI.GetPayloadV5(*res.PayloadID) + } else if isPrague { envelope, err = f.engineAPI.GetPayloadV4(*res.PayloadID) } else if isCancun { envelope, err = f.engineAPI.GetPayloadV3(*res.PayloadID) @@ -182,7 +237,7 @@ func (f *FakePoS) Start() error { } if envelope.BlobsBundle != nil { - slot := (envelope.ExecutionPayload.Timestamp - f.eth.BlockChain().Genesis().Time()) / f.blockTime + slot := (envelope.ExecutionPayload.Timestamp - genesisHeader.Time) / f.blockTime if f.beacon == nil { f.log.Error("no blobs storage available") continue diff --git a/op-e2e/e2eutils/geth/geth.go b/op-e2e/e2eutils/geth/geth.go index e995c5902028d..f11e3b1a05263 100644 --- a/op-e2e/e2eutils/geth/geth.go +++ b/op-e2e/e2eutils/geth/geth.go @@ -1,15 +1,19 @@ package geth import ( + "context" "fmt" + "math/big" "time" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/txpool/blobpool" "github.com/ethereum/go-ethereum/core/txpool/legacypool" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/eth/ethconfig" @@ -17,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" // Force-load the tracer engines to trigger registration _ "github.com/ethereum/go-ethereum/eth/tracers/js" @@ -59,16 +64,9 @@ func InitL1(blockTime uint64, finalizedDistance uint64, genesis *core.Genesis, c return nil, nil, err } - fakepos := &FakePoS{ - clock: c, - eth: gethInstance.Backend, - log: log.Root(), // geth logger is global anyway. Would be nice to replace with a local logger though. - blockTime: blockTime, - finalizedDistance: finalizedDistance, - safeDistance: 10, - engineAPI: catalyst.NewConsensusAPI(gethInstance.Backend), - beacon: beaconSrv, - } + fakepos := NewFakePoS(&gethBackend{ + chain: gethInstance.Backend.BlockChain(), + }, catalyst.NewConsensusAPI(gethInstance.Backend), c, log.Root(), blockTime, finalizedDistance, beaconSrv, gethInstance.Backend.BlockChain().Config()) // Instead of running a whole beacon node, we run this fake-proof-of-stake sidecar that sequences L1 blocks using the Engine API. gethInstance.Node.RegisterLifecycle(fakepos) @@ -76,6 +74,42 @@ func InitL1(blockTime uint64, finalizedDistance uint64, genesis *core.Genesis, c return gethInstance, fakepos, nil } +func WithAuth(jwtPath string) GethOption { + return func(_ *ethconfig.Config, nodeCfg *node.Config) error { + nodeCfg.AuthAddr = "127.0.0.1" + nodeCfg.AuthPort = 0 + nodeCfg.JWTSecret = jwtPath + return nil + } +} + +type gethBackend struct { + chain *core.BlockChain +} + +func (b *gethBackend) HeaderByNumber(_ context.Context, num *big.Int) (*types.Header, error) { + if num == nil { + return b.chain.CurrentBlock(), nil + } + var h *types.Header + if num.IsInt64() && num.Int64() < 0 { + switch num.Int64() { + case int64(rpc.LatestBlockNumber): + h = b.chain.CurrentBlock() + case int64(rpc.SafeBlockNumber): + h = b.chain.CurrentSafeBlock() + case int64(rpc.FinalizedBlockNumber): + h = b.chain.CurrentFinalBlock() + } + } else { + h = b.chain.GetHeaderByNumber(num.Uint64()) + } + if h == nil { + return nil, ethereum.NotFound + } + return h, nil +} + func defaultNodeConfig(name string, jwtPath string) *node.Config { return &node.Config{ Name: name, @@ -117,7 +151,8 @@ func InitL2(name string, genesis *core.Genesis, jwtPath string, opts ...GethOpti // createGethNode creates an in-memory geth node based on the configuration. // The private keys are added to the keystore and are unlocked. -// If the node is l2, catalyst is enabled. +// Catalyst is always enabled. If the node is an L1, the catalyst API can be used by alternative +// sequencers (e.g., op-test-sequencer) if the default FakePoS is stopped. // The node should be started and then closed when done. func createGethNode(l2 bool, nodeCfg *node.Config, ethCfg *ethconfig.Config, opts ...GethOption) (*GethInstance, error) { for i, opt := range opts { @@ -146,12 +181,9 @@ func createGethNode(l2 bool, nodeCfg *node.Config, ethCfg *ethconfig.Config, opt n.RegisterAPIs(tracers.APIs(backend.APIBackend)) - // Enable catalyst if l2 - if l2 { - if err := catalyst.Register(n, backend); err != nil { - n.Close() - return nil, err - } + if err := catalyst.Register(n, backend); err != nil { + n.Close() + return nil, err } return &GethInstance{ Backend: backend, diff --git a/op-e2e/e2eutils/intentbuilder/builder.go b/op-e2e/e2eutils/intentbuilder/builder.go index 4c6837e94fcfc..be8d4d8c8eae2 100644 --- a/op-e2e/e2eutils/intentbuilder/builder.go +++ b/op-e2e/e2eutils/intentbuilder/builder.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" @@ -28,6 +29,9 @@ type L1Configurator interface { WithGasLimit(v uint64) L1Configurator WithExcessBlobGas(v uint64) L1Configurator WithPragueOffset(v uint64) L1Configurator + WithOsakaOffset(v uint64) L1Configurator + WithBPO1Offset(v uint64) L1Configurator + WithL1BlobSchedule(schedule *params.BlobScheduleConfig) L1Configurator WithPrefundedAccount(addr common.Address, amount uint256.Int) L1Configurator } @@ -156,7 +160,6 @@ func RoleToAddrProvider(t require.TestingT, dk devkeys.Keys, chainID eth.ChainID } type intentBuilder struct { - t require.TestingT l1StartBlockHash *common.Hash intent *state.Intent } @@ -195,6 +198,7 @@ func (b *intentBuilder) WithL2(l2ChainID eth.ChainID) (Builder, L2Configurator) Eip1559DenominatorCanyon: standard.Eip1559DenominatorCanyon, Eip1559Denominator: standard.Eip1559Denominator, Eip1559Elasticity: standard.Eip1559Elasticity, + GasLimit: standard.GasLimit, DeployOverrides: make(map[string]any), } b.intent.Chains = append(b.intent.Chains, chainIntent) @@ -219,7 +223,9 @@ func (b *intentBuilder) WithGlobalOverride(key string, value any) Builder { } func (b *intentBuilder) Build() (*state.Intent, error) { - require.NoError(b.t, b.intent.Check(), "invalid intent") + if err := b.intent.Check(); err != nil { + return nil, fmt.Errorf("check intent: %w", err) + } return b.intent, nil } @@ -301,6 +307,24 @@ func (c *l1Configurator) WithPragueOffset(v uint64) L1Configurator { return c } +func (c *l1Configurator) WithOsakaOffset(v uint64) L1Configurator { + c.initL1DevGenesisParams() + c.builder.intent.L1DevGenesisParams.OsakaTimeOffset = &v + return c +} + +func (c *l1Configurator) WithBPO1Offset(v uint64) L1Configurator { + c.initL1DevGenesisParams() + c.builder.intent.L1DevGenesisParams.BPO1TimeOffset = &v + return c +} + +func (c *l1Configurator) WithL1BlobSchedule(schedule *params.BlobScheduleConfig) L1Configurator { + c.initL1DevGenesisParams() + c.builder.intent.L1DevGenesisParams.BlobSchedule = schedule + return c +} + func (c *l1Configurator) WithPrefundedAccount(addr common.Address, amount uint256.Int) L1Configurator { c.initL1DevGenesisParams() c.builder.intent.L1DevGenesisParams.Prefund[addr] = (*hexutil.U256)(&amount) diff --git a/op-e2e/e2eutils/intentbuilder/builder_test.go b/op-e2e/e2eutils/intentbuilder/builder_test.go index eeffb15a14d9e..d98eec5ce387f 100644 --- a/op-e2e/e2eutils/intentbuilder/builder_test.go +++ b/op-e2e/e2eutils/intentbuilder/builder_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -35,6 +36,8 @@ func TestBuilder(t *testing.T) { // Configure L1 pragueOffset := uint64(100) + osakaOffset := uint64(200) + bpo1Offset := uint64(300) alice := common.HexToAddress("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa") aliceFunds := uint256.NewInt(10000) l1Params := state.L1DevGenesisParams{ @@ -44,6 +47,8 @@ func TestBuilder(t *testing.T) { ExcessBlobGas: 123, }, PragueTimeOffset: &pragueOffset, + OsakaTimeOffset: &osakaOffset, + BPO1TimeOffset: &bpo1Offset, Prefund: map[common.Address]*hexutil.U256{ alice: (*hexutil.U256)(aliceFunds), }, @@ -54,6 +59,8 @@ func TestBuilder(t *testing.T) { l1Config.WithGasLimit(l1Params.BlockParams.GasLimit) l1Config.WithExcessBlobGas(l1Params.BlockParams.ExcessBlobGas) l1Config.WithPragueOffset(*l1Params.PragueTimeOffset) + l1Config.WithOsakaOffset(*l1Params.OsakaTimeOffset) + l1Config.WithBPO1Offset(*l1Params.BPO1TimeOffset) l1Config.WithPrefundedAccount(alice, *aliceFunds) // Configure L2 @@ -157,6 +164,7 @@ func TestBuilder(t *testing.T) { Eip1559DenominatorCanyon: 250, Eip1559Denominator: 50, Eip1559Elasticity: 10, + GasLimit: standard.GasLimit, OperatorFeeScalar: 100, OperatorFeeConstant: 200, DeployOverrides: map[string]any{ diff --git a/op-e2e/e2eutils/opnode/opnode.go b/op-e2e/e2eutils/opnode/opnode.go index 675b82f4c11b1..3a8cf3b6963c4 100644 --- a/op-e2e/e2eutils/opnode/opnode.go +++ b/op-e2e/e2eutils/opnode/opnode.go @@ -55,6 +55,9 @@ func (o *Opnode) P2P() p2p.Node { var _ services.RollupNode = (*Opnode)(nil) func NewOpnode(l log.Logger, c *config.Config, errFn func(error)) (*Opnode, error) { + if err := c.Check(); err != nil { + return nil, err + } var cycle cliapp.Lifecycle c.Cancel = func(errCause error) { l.Warn("node requested early shutdown!", "err", errCause) diff --git a/op-e2e/faultproofs/response_delay_test.go b/op-e2e/faultproofs/response_delay_test.go new file mode 100644 index 0000000000000..2e809345aca33 --- /dev/null +++ b/op-e2e/faultproofs/response_delay_test.go @@ -0,0 +1,130 @@ +package faultproofs + +import ( + "context" + "testing" + "time" + + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/challenger" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/disputegame" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// TestChallengerResponseDelay tests that the challenger respects the configured response delay +// This is a sanity check integration test that verifies minimum delay timing is honored +func TestChallengerResponseDelay(t *testing.T) { + op_e2e.InitParallel(t) + + // Test with different delay configurations + testCases := []struct { + name string + delay time.Duration + minTime time.Duration // Minimum expected time for challenger response + }{ + { + name: "NoDelay", + delay: 0, + minTime: 0, // No minimum delay expected + }, + { + name: "ShortDelay", + delay: 2 * time.Second, + minTime: 2 * time.Second, // Must take at least the configured delay + }, + { + name: "MediumDelay", + delay: 5 * time.Second, + minTime: 5 * time.Second, // Must take at least the configured delay + }, + } + + for _, tc := range testCases { + tc := tc // capture loop variable + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + sys, _ := StartFaultDisputeSystem(t) + t.Cleanup(sys.Close) + + // Create a dispute game with incorrect root to trigger challenger response + disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) + + // Make an invalid claim that the honest challenger should counter + invalidClaim := game.RootClaim(ctx) + + // Record time before starting challenger + startTime := time.Now() + + // Start challenger with response delay + game.StartChallenger(ctx, "sequencer", "DelayedChallenger", + challenger.WithAlphabet(), + challenger.WithPrivKey(sys.Cfg.Secrets.Alice), + challenger.WithResponseDelay(tc.delay), + challenger.WithPollInterval(100*time.Millisecond), // Fast polling to ensure delay isn't from polling + ) + + // Wait for challenger to respond to the invalid root claim + counterClaim := invalidClaim.WaitForCounterClaim(ctx) + responseTime := time.Since(startTime) + + // Sanity check: verify minimum delay is respected (includes polling time and system overhead) + require.GreaterOrEqualf(t, responseTime, tc.minTime, + "Challenger responded too quickly (expected >= %v, got %v)", tc.minTime, responseTime) + + // Verify the counter claim is valid (challenger actually responded correctly) + require.NotNil(t, counterClaim, "Challenger should have posted a counter claim") + counterClaim.RequireCorrectOutputRoot(ctx) + }) + } +} + +// TestChallengerResponseDelayWithMultipleActions tests that delay applies to each individual action +func TestChallengerResponseDelayWithMultipleActions(t *testing.T) { + op_e2e.InitParallel(t) + + if testing.Short() { + t.Skip("Skipping multi-action test during short run") + } + + ctx := context.Background() + sys, _ := StartFaultDisputeSystem(t) + t.Cleanup(sys.Close) + + responseDelay := 2 * time.Second + + disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) + + // Start challenger with response delay + game.StartChallenger(ctx, "sequencer", "DelayedChallenger", + challenger.WithAlphabet(), + challenger.WithPrivKey(sys.Cfg.Secrets.Alice), + challenger.WithResponseDelay(responseDelay), + challenger.WithPollInterval(100*time.Millisecond), + ) + + // Track multiple challenger responses and their timing + var responseTimes []time.Duration + + // First response to root claim + claim := game.RootClaim(ctx) + startTime := time.Now() + claim = claim.WaitForCounterClaim(ctx) + responseTimes = append(responseTimes, time.Since(startTime)) + + // Second response - attack the challenger's claim to trigger another response + startTime = time.Now() + claim = claim.Attack(ctx, common.Hash{0x01}) + claim.WaitForCounterClaim(ctx) + responseTimes = append(responseTimes, time.Since(startTime)) + + // Sanity check: verify each response took at least the minimum delay + for i, responseTime := range responseTimes { + require.GreaterOrEqualf(t, responseTime, responseDelay, + "Response %d was too fast (expected >= %v, got %v)", i+1, responseDelay, responseTime) + } + + require.Len(t, responseTimes, 2, "Should have measured 2 response times") +} diff --git a/op-e2e/faultproofs/super_test.go b/op-e2e/faultproofs/super_test.go index 11355e1e130cb..0944f0d97dced 100644 --- a/op-e2e/faultproofs/super_test.go +++ b/op-e2e/faultproofs/super_test.go @@ -238,9 +238,6 @@ func TestSuperCannonStepWithPreimage_nonExistingPreimage(t *testing.T) { } RunTestsAcrossVmTypes(t, preimageConditions, func(t *testing.T, allocType config.AllocType, preimageType string) { - if preimageType == "blob" || preimageType == "sha256" { - t.Skip("TODO(#15311): Add blob preimage test case. sha256 is also used for blobs") - } testSuperPreimageStep(t, utils.FirstPreimageLoadOfType(preimageType), false, allocType) }, WithNextVMOnly[string](), WithTestName(testName)) } @@ -254,15 +251,17 @@ func TestSuperCannonStepWithPreimage_existingPreimage(t *testing.T) { func testSuperPreimageStep(t *testing.T, preimageType utils.PreimageOpt, preloadPreimage bool, allocType config.AllocType) { ctx := context.Background() - sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithAllocType(allocType)) + sys, disputeGameFactory, _ := StartInteropFaultDisputeSystem(t, WithBlobBatches(), WithAllocType(allocType)) status, err := sys.SupervisorClient().SyncStatus(ctx) require.NoError(t, err) - l2Timestamp := status.SafeTimestamp + l2Timestamp := status.SafeTimestamp + 40 game := disputeGameFactory.StartSuperCannonGameWithCorrectRootAtTimestamp(ctx, l2Timestamp) - topGameLeaf := game.DisputeLastBlock(ctx) - game.LogGameData(ctx) + correctTrace := game.CreateHonestActor(ctx, disputegame.WithPrivKey(malloryKey(t)), func(c *disputegame.HonestActorConfig) { + c.ChallengerOpts = append(c.ChallengerOpts, challenger.WithDepset(t, sys.DependencySet())) + }) + topGameLeaf := game.InitFirstDerivationGame(ctx, correctTrace) game.StartChallenger(ctx, "Challenger", challenger.WithPrivKey(aliceKey(t)), challenger.WithDepset(t, sys.DependencySet())) @@ -270,6 +269,7 @@ func testSuperPreimageStep(t *testing.T, preimageType utils.PreimageOpt, preload // This presents an opportunity for the challenger to step on our dishonest claim at the bottom. // This assumes the execution game depth is even. But if it is odd, then this test should be set up more like the FDG counter part. topGameLeaf = topGameLeaf.Attack(ctx, common.Hash{0x01}) + game.LogGameData(ctx) // Now the honest challenger is positioned as the defender of the execution game. We then move to challenge it to induce a preimage load preimageLoadCheck := game.CreateStepPreimageLoadCheck(ctx) diff --git a/op-e2e/faultproofs/util.go b/op-e2e/faultproofs/util.go index 2654ef5b05025..31e76cb3b0e88 100644 --- a/op-e2e/faultproofs/util.go +++ b/op-e2e/faultproofs/util.go @@ -3,6 +3,9 @@ package faultproofs import ( "crypto/ecdsa" "fmt" + "os" + "strconv" + "sync" "testing" op_e2e "github.com/ethereum-optimism/optimism/op-e2e" @@ -60,6 +63,8 @@ func WithLatestFork() faultDisputeConfigOpts { cfg.DeployConfig.L2GenesisGraniteTimeOffset = &genesisActivation cfg.DeployConfig.L2GenesisHoloceneTimeOffset = &genesisActivation cfg.DeployConfig.L2GenesisIsthmusTimeOffset = &genesisActivation + // TODO(#17348): Jovian is not supported in op-e2e tests yet + //cfg.DeployConfig.L2GenesisJovianTimeOffset = &genesisActivation }) } } @@ -190,8 +195,49 @@ func RunTestsAcrossVmTypes[T any](t *testing.T, testCases []T, test VMTestCase[T testName := options.testNameModifier(string(allocType), testCase) t.Run(testName, func(t *testing.T) { op_e2e.InitParallel(t, op_e2e.UsesCannon) - test(t, allocType, testCase) + func() { + limiter.Acquire() + defer limiter.Release() + test(t, allocType, testCase) + }() }) } } } + +var executorLimitEnv = os.Getenv("OP_E2E_EXECUTOR_LIMIT") + +type executorLimiter struct { + ch chan struct{} +} + +func (l *executorLimiter) Acquire() { + // TODO: sample memory usage over time to admit more tests and reduce total runtime. + initExecutorLimiter() + l.ch <- struct{}{} +} + +func (l *executorLimiter) Release() { + <-l.ch +} + +var limiter executorLimiter +var limiterOnce sync.Once + +func initExecutorLimiter() { + limiterOnce.Do(func() { + var executorLimit uint64 + if executorLimitEnv != "" { + var err error + executorLimit, err = strconv.ParseUint(executorLimitEnv, 10, 0) + if err != nil { + panic(fmt.Sprintf("Could not parse OP_E2E_EXECUTOR_LIMIT env var %v: %v", executorLimitEnv, err)) + } + } else { + // faultproof tests may use 1 GiB of memory. So let's be very conservative and aggressively limit the number of test executions + // considering other processes running on the same machine. + executorLimit = 16 + } + limiter = executorLimiter{ch: make(chan struct{}, executorLimit)} + }) +} diff --git a/op-e2e/interop/interop_test.go b/op-e2e/interop/interop_test.go index 5d61a9321118d..ba014c5f1c2f7 100644 --- a/op-e2e/interop/interop_test.go +++ b/op-e2e/interop/interop_test.go @@ -425,7 +425,7 @@ func TestProposals(t *testing.T) { require.NotNil(t, proposer.DisputeGameFactoryAddr) gameFactoryAddr := *proposer.DisputeGameFactoryAddr - rpcClient, err := dial.DialRPCClientWithTimeout(context.Background(), time.Minute, logger, s2.L1().UserRPC().RPC()) + rpcClient, err := dial.DialRPCClientWithTimeout(context.Background(), logger, s2.L1().UserRPC().RPC()) require.NoError(t, err) caller := batching.NewMultiCaller(rpcClient, batching.DefaultBatchSize) factory := contracts.NewDisputeGameFactoryContract(metrics.NoopContractMetrics, gameFactoryAddr, caller) diff --git a/op-e2e/interop/supersystem.go b/op-e2e/interop/supersystem.go index b8ec4c2836b7c..9c859172067a4 100644 --- a/op-e2e/interop/supersystem.go +++ b/op-e2e/interop/supersystem.go @@ -29,14 +29,13 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/interopgen" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/contracts/bindings/emit" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/contracts/bindings/inbox" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/system/helpers" l2os "github.com/ethereum-optimism/optimism/op-proposer/proposer" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/endpoint" @@ -88,6 +87,7 @@ type SuperSystem interface { // L2 level ChainID(network string) *big.Int RollupConfig(network string) *rollup.Config + L1Genesis() *core.Genesis L2Genesis(network string) *core.Genesis UserKey(network, username string) ecdsa.PrivateKey L2OperatorKey(network string, role devkeys.ChainOperatorRole) ecdsa.PrivateKey @@ -176,6 +176,7 @@ func (s *interopE2ESystem) prepareWorld(w WorldResourcePaths) (*interopgen.World for _, l2Cfg := range worldCfg.L2s { require.NotNil(s.t, l2Cfg.L2GenesisIsthmusTimeOffset, "expecting isthmus fork to be enabled for interop deployments") + require.NotNil(s.t, l2Cfg.L2GenesisIsthmusTimeOffset, "expecting jovian fork to be enabled for interop deployments") } // create a logger for the world configuration @@ -200,7 +201,7 @@ func (s *interopE2ESystem) prepareL1() (*fakebeacon.FakeBeacon, *geth.GethInstan blockTimeL1 := uint64(6) blobPath := s.t.TempDir() bcn := fakebeacon.NewBeacon(s.logger.New("role", "l1_cl"), - e2eutils.NewBlobStore(), genesisTimestampL1, blockTimeL1) + blobstore.New(), genesisTimestampL1, blockTimeL1) s.t.Cleanup(func() { _ = bcn.Close() }) @@ -325,9 +326,8 @@ func (s *interopE2ESystem) SupervisorClient() *sources.SupervisorClient { if s.superClient != nil { return s.superClient } - cl, err := client.NewRPC(context.Background(), s.logger, s.supervisor.RPC()) + superClient, err := dial.DialSupervisorClientWithTimeout(context.Background(), s.logger, s.supervisor.RPC()) require.NoError(s.t, err, "failed to dial supervisor RPC") - superClient := sources.NewSupervisorClient(cl) s.superClient = superClient return superClient } @@ -432,7 +432,7 @@ func (s *interopE2ESystem) L1GethClient() *ethclient.Client { rpcEndpoint, func(v string) *rpc.Client { logger := testlog.Logger(s.t, log.LevelInfo) - cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v) + cl, err := dial.DialRPCClientWithTimeout(context.Background(), logger, v) require.NoError(s.t, err, "failed to dial L1 eth node instance") return cl }) @@ -449,6 +449,14 @@ func (s *interopE2ESystem) L2OperatorKey(id string, role devkeys.ChainOperatorRo return s.l2s[id].operatorKeys[role] } +func (s *interopE2ESystem) L1ID() string { + return s.worldOutput.L1.Genesis.Config.ChainID.String() +} + +func (s *interopE2ESystem) L1Genesis() *core.Genesis { + return s.worldOutput.L1.Genesis +} + // L2IDs returns the list of L2 IDs, which are the keys of the L2s map func (s *interopE2ESystem) L2IDs() []string { ids := make([]string, 0, len(s.l2s)) @@ -589,7 +597,7 @@ func (s *interopE2ESystem) DependencySet() *depset.StaticConfigDependencySet { func mustDial(t *testing.T, logger log.Logger) func(v string) *rpc.Client { return func(v string) *rpc.Client { - cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v) + cl, err := dial.DialRPCClientWithTimeout(context.Background(), logger, v) require.NoError(t, err, "failed to dial") return cl } diff --git a/op-e2e/interop/supersystem_l2.go b/op-e2e/interop/supersystem_l2.go index dcd7847682acb..fc3fb282a5877 100644 --- a/op-e2e/interop/supersystem_l2.go +++ b/op-e2e/interop/supersystem_l2.go @@ -34,6 +34,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" gn "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/stretchr/testify/require" ) @@ -76,7 +77,7 @@ func (s *interopE2ESystem) L2GethClient(id string, name string) *ethclient.Clien rpcEndpoint, func(v string) *rpc.Client { logger := testlog.Logger(s.t, log.LevelInfo).New("node", id) - cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v) + cl, err := dial.DialRPCClientWithTimeout(context.Background(), logger, v) require.NoError(s.t, err, "failed to dial eth node instance %s", id) return cl }) @@ -98,9 +99,9 @@ func (s *interopE2ESystem) L2RollupClient(id string, name string) *sources.Rollu } rollupClA, err := dial.DialRollupClientWithTimeout( context.Background(), - time.Second*15, s.logger, - node.opNode.UserRPC().RPC()) + node.opNode.UserRPC().RPC(), + ) require.NoError(s.t, err, "failed to dial rollup client") node.rollupClient = rollupClA return node.rollupClient @@ -112,7 +113,7 @@ func (s *interopE2ESystem) L2RollupClient(id string, name string) *sources.Rollu func (s *interopE2ESystem) newL2(id string, l2Out *interopgen.L2Output, depSet depset.DependencySet) l2Net { operatorKeys := s.newOperatorKeysForL2(l2Out) l2Geth := s.newGethForL2(id, "sequencer", l2Out) - opNode := s.newNodeForL2(id, "sequencer", l2Out, depSet, operatorKeys, l2Geth, true) + opNode := s.newNodeForL2(id, "sequencer", l2Out, depSet, operatorKeys, l2Geth, true, s.l1.Backend.BlockChain().Config()) proposer := s.newProposerForL2(id, operatorKeys) batcher := s.newBatcherForL2(id, operatorKeys, l2Geth, opNode) @@ -131,7 +132,7 @@ func (s *interopE2ESystem) newL2(id string, l2Out *interopgen.L2Output, depSet d func (s *interopE2ESystem) AddNode(id string, name string) { l2 := s.l2s[id] l2Geth := s.newGethForL2(id, name, l2.l2Out) - opNode := s.newNodeForL2(id, name, l2.l2Out, s.DependencySet(), l2.operatorKeys, l2Geth, false) + opNode := s.newNodeForL2(id, name, l2.l2Out, s.DependencySet(), l2.operatorKeys, l2Geth, false, s.l1.Backend.BlockChain().Config()) l2.nodes[name] = &l2Node{name: name, opNode: opNode, l2Geth: l2Geth} endpoint, secret := l2.nodes[name].opNode.InteropRPC() @@ -148,6 +149,7 @@ func (s *interopE2ESystem) newNodeForL2( operatorKeys map[devkeys.ChainOperatorRole]ecdsa.PrivateKey, l2Geth *geth.GethInstance, isSequencer bool, + l1ChainConfig *params.ChainConfig, ) *opnode.Opnode { logger := s.logger.New("role", "op-node-"+id+"-"+name) p2pKey := operatorKeys[devkeys.SequencerP2PRole] @@ -158,6 +160,7 @@ func (s *interopE2ESystem) newNodeForL2( TrustRPC: false, RPCProviderKind: sources.RPCKindDebugGeth, }, + L1ChainConfig: l1ChainConfig, L2: &config.L2EndpointConfig{ L2EngineAddr: l2Geth.AuthRPC().RPC(), L2EngineJWTSecret: testingJWTSecret, diff --git a/op-e2e/opgeth/op_geth.go b/op-e2e/opgeth/op_geth.go index a8b57ff5a093e..2a79606dd2562 100644 --- a/op-e2e/opgeth/op_geth.go +++ b/op-e2e/opgeth/op_geth.go @@ -202,7 +202,7 @@ func (d *OpGeth) StartBlockBuilding(ctx context.Context, attrs *eth.PayloadAttri // CreatePayloadAttributes creates a valid PayloadAttributes containing a L1Info deposit transaction followed by the supplied transactions. func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.PayloadAttributes, error) { timestamp := d.L2Head.Timestamp + 2 - l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp)) + l1Info, err := derive.L1InfoDepositBytes(d.l2Engine.RollupConfig(), d.L1ChainConfig, d.SystemConfig, d.sequenceNum, d.L1Head, uint64(timestamp)) if err != nil { return nil, err } @@ -239,6 +239,10 @@ func (d *OpGeth) CreatePayloadAttributes(txs ...*types.Transaction) (*eth.Payloa Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconBlockRoot, } + if d.L2ChainConfig.IsJovian(uint64(timestamp)) { + attrs.MinBaseFee = new(uint64) + *attrs.MinBaseFee = d.SystemConfig.MinBaseFee + } if d.L2ChainConfig.IsHolocene(uint64(timestamp)) { attrs.EIP1559Params = new(eth.Bytes8) *attrs.EIP1559Params = d.SystemConfig.EIP1559Params diff --git a/op-e2e/opgeth/op_geth_test.go b/op-e2e/opgeth/op_geth_test.go index 36ea6f457faa7..d1feaf40f8122 100644 --- a/op-e2e/opgeth/op_geth_test.go +++ b/op-e2e/opgeth/op_geth_test.go @@ -405,7 +405,7 @@ func TestPreregolith(t *testing.T) { defer opGeth.Close() rollupCfg := rollup.Config{} - systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.SystemConfig, 1, opGeth.L1Head, 0) + systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.L1ChainConfig, opGeth.SystemConfig, 1, opGeth.L1Head, 0) systemTx.IsSystemTransaction = true require.NoError(t, err) @@ -597,7 +597,7 @@ func TestRegolith(t *testing.T) { test.activateRegolith(ctx, t, opGeth) rollupCfg := rollup.Config{} - systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.SystemConfig, 1, opGeth.L1Head, 0) + systemTx, err := derive.L1InfoDeposit(&rollupCfg, opGeth.L1ChainConfig, opGeth.SystemConfig, 1, opGeth.L1Head, 0) systemTx.IsSystemTransaction = true require.NoError(t, err) diff --git a/op-e2e/scripts/gen-binding.sh b/op-e2e/scripts/gen-binding.sh new file mode 100755 index 0000000000000..2d16ec2b9854f --- /dev/null +++ b/op-e2e/scripts/gen-binding.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -euo pipefail + +REPO_ROOT="$(git rev-parse --show-toplevel)" +cd "${REPO_ROOT}" + +if [[ $# -lt 1 ]]; then + echo "usage: $0 CONTRACT_NAME" >&2 + exit 1 +fi + +CONTRACT="$1" +ARTIFACT_DIR="packages/contracts-bedrock/forge-artifacts/${CONTRACT}.sol" +ARTIFACT_PATH="${ARTIFACT_DIR}/${CONTRACT}.json" + +if [[ ! -f "${ARTIFACT_PATH}" ]]; then + echo "error: artifact not found at ${ARTIFACT_PATH}. Run the contracts build first." >&2 + exit 1 +fi + +OUTPUT_BASENAME="$(echo "${CONTRACT}" | tr '[:upper:]' '[:lower:]')" +OUTPUT_PATH="op-e2e/bindings/${OUTPUT_BASENAME}.go" + +TMPDIR="$(mktemp -d)" +trap 'rm -rf "${TMPDIR}"' EXIT + +ABI_PATH="${TMPDIR}/${CONTRACT}.abi.json" +BIN_PATH="${TMPDIR}/${CONTRACT}.bin" + +jq '.abi' "${ARTIFACT_PATH}" > "${ABI_PATH}" +jq -r '.bytecode.object' "${ARTIFACT_PATH}" > "${BIN_PATH}" + +abigen --pkg bindings --type "${CONTRACT}" --abi "${ABI_PATH}" --bin "${BIN_PATH}" --out "${OUTPUT_PATH}" +gofmt -w "${OUTPUT_PATH}" diff --git a/op-e2e/system/conductor/system_adminrpc_test.go b/op-e2e/system/conductor/system_adminrpc_test.go index 25bfd6e4f59a5..eebcdd6b54d82 100644 --- a/op-e2e/system/conductor/system_adminrpc_test.go +++ b/op-e2e/system/conductor/system_adminrpc_test.go @@ -182,7 +182,8 @@ func TestLoadSequencerStateOnStarted_Started(t *testing.T) { func TestPostUnsafePayload(t *testing.T) { op_e2e.InitParallel(t) - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() cfg := e2esys.DefaultSystemConfig(t) cfg.Nodes["verifier"].RPC.EnableAdmin = true diff --git a/op-e2e/system/da/da_throttling_test.go b/op-e2e/system/da/da_throttling_test.go index b7b2726ff73a3..1689e673a80c7 100644 --- a/op-e2e/system/da/da_throttling_test.go +++ b/op-e2e/system/da/da_throttling_test.go @@ -73,7 +73,8 @@ func TestDATxThrottling(t *testing.T) { require.Nil(t, bigReceipt, "large tx did not get throttled") // disable throttling to let big tx through - batcher.Config.ThrottleParams.TxSize = math.MaxUint64 + batcher.Config.ThrottleParams.TxSizeUpperLimit = math.MaxUint64 + batcher.Config.ThrottleParams.TxSizeLowerLimit = math.MaxUint64 - 1 err = batcher.SetThrottleController(config.StepControllerType, nil) // We need to set the controller again to propagate the change require.NoError(t, err) diff --git a/op-e2e/system/da/l1_beacon_client_test.go b/op-e2e/system/da/l1_beacon_client_test.go index 68698a5e3187a..6b7b186b0345b 100644 --- a/op-e2e/system/da/l1_beacon_client_test.go +++ b/op-e2e/system/da/l1_beacon_client_test.go @@ -6,7 +6,7 @@ import ( op_e2e "github.com/ethereum-optimism/optimism/op-e2e" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -22,7 +22,7 @@ func TestGetVersion(t *testing.T) { l := testlog.Logger(t, log.LevelInfo) - blobStore := e2eutils.NewBlobStore() + blobStore := blobstore.New() beaconApi := fakebeacon.NewBeacon(l, blobStore, uint64(0), uint64(0)) t.Cleanup(func() { _ = beaconApi.Close() @@ -42,7 +42,7 @@ func Test404NotFound(t *testing.T) { l := testlog.Logger(t, log.LevelInfo) - blobStore := e2eutils.NewBlobStore() + blobStore := blobstore.New() beaconApi := fakebeacon.NewBeacon(l, blobStore, uint64(0), uint64(12)) t.Cleanup(func() { _ = beaconApi.Close() diff --git a/op-e2e/system/e2esys/setup.go b/op-e2e/system/e2esys/setup.go index 6362ccdbed85d..f643a1f84e603 100644 --- a/op-e2e/system/e2esys/setup.go +++ b/op-e2e/system/e2esys/setup.go @@ -50,6 +50,7 @@ import ( "github.com/ethereum-optimism/optimism/op-e2e/config/secrets" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/batcher" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" @@ -359,7 +360,7 @@ type System struct { Cfg SystemConfig RollupConfig *rollup.Config - + L1GenesisCfg *core.Genesis L2GenesisCfg *core.Genesis // Connections to running nodes @@ -474,6 +475,10 @@ func (sys *System) RollupCfgs() []*rollup.Config { return []*rollup.Config{sys.RollupConfig} } +func (sys *System) L1Genesis() *core.Genesis { + return sys.L1GenesisCfg +} + func (sys *System) L2Genesis() *core.Genesis { return sys.L2GenesisCfg } @@ -576,10 +581,11 @@ func WithBatcherCompressionAlgo(ca derive.CompressionAlgo) StartOption { func WithBatcherThrottling(interval time.Duration, threshold, txSize, blockSize uint64) StartOption { return StartOption{ BatcherMod: func(cfg *bss.CLIConfig) { - cfg.ThrottleThreshold = threshold - cfg.ThrottleControllerType = batcherCfg.StepControllerType - cfg.ThrottleTxSize = txSize - cfg.ThrottleBlockSize = blockSize + cfg.ThrottleConfig.LowerThreshold = threshold + cfg.ThrottleConfig.ControllerType = batcherCfg.StepControllerType + cfg.ThrottleConfig.TxSizeLowerLimit = txSize + cfg.ThrottleConfig.BlockSizeLowerLimit = blockSize + cfg.ThrottleConfig.BlockSizeUpperLimit = blockSize * 100 }, } } @@ -628,6 +634,8 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, return nil, err } + sys.L1GenesisCfg = l1Genesis + for addr, amount := range cfg.Premine { if existing, ok := l1Genesis.Alloc[addr]; ok { l1Genesis.Alloc[addr] = types.Account{ @@ -742,7 +750,7 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, // Create a fake Beacon node to hold on to blobs created by the L1 miner, and to serve them to L2 bcn := fakebeacon.NewBeacon(testlog.Logger(t, log.LevelInfo).New("role", "l1_cl"), - e2eutils.NewBlobStore(), l1Genesis.Timestamp, cfg.DeployConfig.L1BlockTime) + blobstore.New(), l1Genesis.Timestamp, cfg.DeployConfig.L1BlockTime) t.Cleanup(func() { _ = bcn.Close() }) @@ -885,6 +893,7 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, if err := c.LoadPersisted(cfg.Loggers[name]); err != nil { return nil, err } + c.L1ChainConfig = l1Genesis.Config if p, ok := p2pNodes[name]; ok { c.P2P = p @@ -1149,7 +1158,7 @@ func (sys *System) RollupClient(name string) *sources.RollupClient { } rpcClient := endpoint.DialRPC(endpoint.PreferAnyRPC, sys.RollupEndpoint(name), func(v string) *rpc.Client { logger := testlog.Logger(sys.t, log.LevelInfo).New("rollupClient", name) - cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v) + cl, err := dial.DialRPCClientWithTimeout(context.Background(), logger, v) require.NoError(sys.t, err, "failed to dial rollup instance %s", name) return cl }) @@ -1168,7 +1177,7 @@ func (sys *System) NodeClient(name string) *ethclient.Client { } rpcCl := endpoint.DialRPC(endpoint.PreferAnyRPC, sys.NodeEndpoint(name), func(v string) *rpc.Client { logger := testlog.Logger(sys.t, log.LevelInfo).New("node", name) - cl, err := dial.DialRPCClientWithTimeout(context.Background(), 30*time.Second, logger, v) + cl, err := dial.DialRPCClientWithTimeout(context.Background(), logger, v) require.NoError(sys.t, err, "failed to dial eth node instance %s", name) return cl }) diff --git a/op-e2e/system/fees/l1info_test.go b/op-e2e/system/fees/l1info_test.go index 7ae066d993d5c..6edf3cf79b0fe 100644 --- a/op-e2e/system/fees/l1info_test.go +++ b/op-e2e/system/fees/l1info_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/stretchr/testify/require" @@ -171,7 +172,7 @@ func TestL1InfoContract(t *testing.T) { l1blocks[h].BlobBaseFeeScalar = scalars.BlobBaseFeeScalar l1blocks[h].BaseFeeScalar = scalars.BaseFeeScalar if excess := b.ExcessBlobGas(); excess != nil { - l1blocks[h].BlobBaseFee = eth.CalcBlobFeeDefault(b.Header()) + l1blocks[h].BlobBaseFee = eip4844.CalcBlobFee(sys.L1Genesis().Config, b.Header()) } else { l1blocks[h].BlobBaseFee = big.NewInt(1) } diff --git a/op-e2e/system/p2p/gossip_test.go b/op-e2e/system/p2p/gossip_test.go index a553bbea21c46..9747ca1c7939d 100644 --- a/op-e2e/system/p2p/gossip_test.go +++ b/op-e2e/system/p2p/gossip_test.go @@ -29,6 +29,7 @@ import ( // TestSystemMockP2P sets up a L1 Geth node, a rollup node, and a L2 geth node and then confirms that // the nodes can sync L2 blocks before they are confirmed on L1. func TestSystemMockP2P(t *testing.T) { + t.Skipf("skipping due to high flakiness") op_e2e.InitParallel(t) cfg := e2esys.DefaultSystemConfig(t) diff --git a/op-e2e/system/proofs/system_fpp_test.go b/op-e2e/system/proofs/system_fpp_test.go index e21fbcff4a722..2000d61baa5ce 100644 --- a/op-e2e/system/proofs/system_fpp_test.go +++ b/op-e2e/system/proofs/system_fpp_test.go @@ -79,6 +79,8 @@ func applySpanBatchActivation(active bool, dp *genesis.DeployConfig) { dp.L2GenesisFjordTimeOffset = nil dp.L2GenesisGraniteTimeOffset = nil dp.L2GenesisHoloceneTimeOffset = nil + dp.L2GenesisIsthmusTimeOffset = nil + dp.L2GenesisJovianTimeOffset = nil } } @@ -285,7 +287,7 @@ type FaultProofProgramTestScenario struct { // testFaultProofProgramScenario runs the fault proof program in several contexts, given a test scenario. func testFaultProofProgramScenario(t *testing.T, ctx context.Context, sys *e2esys.System, s *FaultProofProgramTestScenario) { preimageDir := t.TempDir() - fppConfig := oppconf.NewSingleChainConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, s.L1Head, s.L2Head, s.L2OutputRoot, common.Hash(s.L2Claim), s.L2ClaimBlockNumber) + fppConfig := oppconf.NewSingleChainConfig(sys.RollupConfig, sys.L2GenesisCfg.Config, sys.L1GenesisCfg.Config, s.L1Head, s.L2Head, s.L2OutputRoot, common.Hash(s.L2Claim), s.L2ClaimBlockNumber) fppConfig.L1URL = sys.NodeEndpoint("l1").RPC() fppConfig.L2URLs = []string{sys.NodeEndpoint("sequencer").RPC()} fppConfig.L1BeaconURL = sys.L1BeaconEndpoint().RestHTTP() diff --git a/op-e2e/system/runcfg/protocol_versions_test.go b/op-e2e/system/runcfg/protocol_versions_test.go index d9e0ff1996a66..87a234711b7e0 100644 --- a/op-e2e/system/runcfg/protocol_versions_test.go +++ b/op-e2e/system/runcfg/protocol_versions_test.go @@ -128,16 +128,14 @@ func TestRequiredProtocolVersionChangeAndHalt(t *testing.T) { // Checking if the engine is down is not trivial in op-e2e. // In op-geth we have halting tests covering the Engine API, in op-e2e we instead check if the API stops. _, err = retry.Do(context.Background(), 10, retry.Fixed(time.Second*10), func() (struct{}, error) { - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() url := sys.NodeEndpoint("verifier").(endpoint.HttpRPC).HttpRPC() // The URL of the verifier op-geth becomes empty after it is stopped. // In this case, IsURLAvailable() returns an incorrect value when port 80 happens to be occupied by another process. if url == "http://" { return struct{}{}, nil } - available := client.IsURLAvailable(ctx, url) - if !available && ctx.Err() == nil { // waiting for client to stop responding to RPC requests (slow dials with timeout don't count) + available := client.IsURLAvailable(context.Background(), sys.NodeEndpoint("verifier").(endpoint.HttpRPC).HttpRPC(), 5*time.Second) + if !available { // waiting for client to stop responding to RPC requests (slow dials with timeout don't count) return struct{}{}, nil } return struct{}{}, errors.New("verifier EL node is not closed yet") diff --git a/op-node/README.md b/op-node/README.md index d18e85217754f..4729ef3aeaa66 100644 --- a/op-node/README.md +++ b/op-node/README.md @@ -51,8 +51,8 @@ just op-node --l1=ws://localhost:8546 \ --l1.beacon=http://localhost:4000 \ --l2=ws://localhost:9001 \ - --p2p.listen.tcp=9222 - --p2p.listen.udp=9222 + --p2p.listen.tcp=9222 \ + --p2p.listen.udp=9222 \ --rpc.port=7000 \ --syncmode=execution-layer diff --git a/op-node/benchmarks/batchbuilding_test.go b/op-node/benchmarks/batchbuilding_test.go index 52d529f0b4b1b..6fc94ff24d986 100644 --- a/op-node/benchmarks/batchbuilding_test.go +++ b/op-node/benchmarks/batchbuilding_test.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -114,7 +115,7 @@ func randomBlock(cfg *rollup.Config, rng *rand.Rand, txCount int, timestamp uint // should only be used for testing purposes, as the batch input doesn't contain the necessary information // to build the full block (only non-deposit transactions and a subset of header fields are populated). func singularBatchToBlock(rollupCfg *rollup.Config, batch *derive.SingularBatch) (*types.Block, error) { - l1InfoTx, err := derive.L1InfoDeposit(rollupCfg, eth.SystemConfig{}, 0, &testutils.MockBlockInfo{ + l1InfoTx, err := derive.L1InfoDeposit(rollupCfg, params.MergedTestChainConfig, eth.SystemConfig{}, 0, &testutils.MockBlockInfo{ InfoNum: uint64(batch.EpochNum), InfoHash: batch.EpochHash, }, batch.Timestamp) diff --git a/op-node/config/config.go b/op-node/config/config.go index 5e5f2e61fb954..54b84432306fd 100644 --- a/op-node/config/config.go +++ b/op-node/config/config.go @@ -24,6 +24,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/oppprof" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum/go-ethereum/params" "github.com/ethstorage/da-server/pkg/da/client" "github.com/urfave/cli/v2" ) @@ -40,6 +41,8 @@ type Config struct { Rollup rollup.Config + L1ChainConfig *params.ChainConfig + DependencySet depset.DependencySet // P2PSigner will be used for signing off on published content @@ -149,6 +152,9 @@ func (cfg *Config) Check() error { if err := cfg.L2.Check(); err != nil { return fmt.Errorf("l2 endpoint config error: %w", err) } + if cfg.L1ChainConfig == nil { + return fmt.Errorf("missing L1ChainConfig") + } if cfg.Rollup.EcotoneTime != nil { if cfg.Beacon == nil { return fmt.Errorf("the Ecotone upgrade is scheduled (timestamp = %d) but no L1 Beacon API endpoint is configured", *cfg.Rollup.EcotoneTime) diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index f43a4115a39b4..c860605c386b7 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -171,7 +171,7 @@ var ( L1CacheSize = &cli.UintFlag{ Name: "l1.cache-size", Usage: "Cache size for blocks, receipts and transactions. " + - "If this flag is set to 0, 2/3 of the sequencing window size is used (usually 2400). " + + "If this flag is set to 0, 3/2 of the sequencing window size is used (usually 2400). " + "The default value of 900 (~3h of L1 blocks) is good for (high-throughput) networks that see frequent safe head increments. " + "On (low-throughput) networks with infrequent safe head increments, it is recommended to set this value to 0, " + "or a value that well covers the typical span between safe head increments. " + @@ -187,6 +187,12 @@ var ( Value: time.Second * 12, Category: L1RPCCategory, } + L1ChainConfig = &cli.PathFlag{ + Name: "rollup.l1-chain-config", + Usage: "Path to .json file with the chain configuration for the L1, either in the direct format or genesis.json format (i.e. embedded under the .config property). Not necessary / will be ignored if using Ethereum mainnet or Sepolia as an L1.", + EnvVars: prefixEnvVars("ROLLUP_L1_CHAIN_CONFIG"), + Category: RollupCategory, + } L2EngineKind = &cli.GenericFlag{ Name: "l2.enginekind", Usage: "The kind of engine client, used to control the behavior of optimism in respect to different types of engine clients. Valid options: " + @@ -462,6 +468,7 @@ var optionalFlags = []cli.Flag{ ConductorRpcFlag, ConductorRpcTimeoutFlag, SafeDBPath, + L1ChainConfig, L2EngineKind, DACUrlsFlag, L2EngineRpcTimeout, diff --git a/op-node/node/api.go b/op-node/node/api.go index 727abac075d9f..413896f443b17 100644 --- a/op-node/node/api.go +++ b/op-node/node/api.go @@ -36,7 +36,7 @@ type driverClient interface { StartSequencer(ctx context.Context, blockHash common.Hash) error StopSequencer(context.Context) (common.Hash, error) SequencerActive(context.Context) (bool, error) - OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error + OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) OverrideLeader(ctx context.Context) error ConductorEnabled(ctx context.Context) (bool, error) SetRecoverMode(ctx context.Context, mode bool) error @@ -84,8 +84,8 @@ func (n *adminAPI) PostUnsafePayload(ctx context.Context, envelope *eth.Executio log.Error("payload has bad block hash", "bad_hash", payload.BlockHash.String(), "actual", actual.String()) return fmt.Errorf("payload has bad block hash: %s, actual block hash is: %s", payload.BlockHash.String(), actual.String()) } - - return n.dr.OnUnsafeL2Payload(ctx, envelope) + n.dr.OnUnsafeL2Payload(ctx, envelope) + return nil } // OverrideLeader disables sequencer conductor interactions and allow sequencer to run in non-HA mode during disaster recovery scenarios. diff --git a/op-node/node/conductor.go b/op-node/node/conductor.go index 25527daa31013..b1f0e9c8c0387 100644 --- a/op-node/node/conductor.go +++ b/op-node/node/conductor.go @@ -58,7 +58,7 @@ func (c *ConductorClient) initialize(ctx context.Context) error { return fmt.Errorf("no conductor RPC endpoint available: %w", err) } metricsOpt := rpc.WithRecorder(c.metrics.NewRecorder("conductor")) - conductorRpcClient, err := dial.DialRPCClientWithTimeout(context.Background(), time.Minute*1, c.log, endpoint, metricsOpt) + conductorRpcClient, err := dial.DialRPCClientWithTimeout(context.Background(), c.log, endpoint, metricsOpt) if err != nil { return fmt.Errorf("failed to dial conductor RPC: %w", err) } diff --git a/op-node/node/node.go b/op-node/node/node.go index bbc8c08f1d6d1..9e22f42652c6f 100644 --- a/op-node/node/node.go +++ b/op-node/node/node.go @@ -26,11 +26,9 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/driver" - "github.com/ethereum-optimism/optimism/op-node/rollup/finality" "github.com/ethereum-optimism/optimism/op-node/rollup/interop" "github.com/ethereum-optimism/optimism/op-node/rollup/interop/indexing" "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" - "github.com/ethereum-optimism/optimism/op-node/rollup/status" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -85,8 +83,6 @@ type OpNode struct { interopSys interop.SubSystem - apiEmitter event.Emitter // any API requests that need to emit events can emit from this - // some resources cannot be stopped directly, like the p2p gossipsub router (not our design), // and depend on this ctx to be closed. resourcesCtx context.Context @@ -100,6 +96,8 @@ type OpNode struct { // cancels execution prematurely, e.g. to halt. This may be nil. cancel context.CancelCauseFunc halted atomic.Bool + + tracer tracer.Tracer // used for testing PublishBlock and SignAndPublishL2Payload } // New creates a new OpNode instance. @@ -117,6 +115,7 @@ func New(ctx context.Context, cfg *config.Config, log log.Logger, appVersion str metrics: m, rollupHalt: cfg.RollupHalt, cancel: cfg.Cancel, + tracer: cfg.Tracer, } // not a context leak, gossipsub is closed with a context. n.resourcesCtx, n.resourcesClose = context.WithCancel(context.Background()) @@ -136,18 +135,18 @@ func New(ctx context.Context, cfg *config.Config, log log.Logger, appVersion str func (n *OpNode) init(ctx context.Context, cfg *config.Config) error { n.log.Info("Initializing rollup node", "version", n.appVersion) n.initEventSystem() - if err := n.initTracer(ctx, cfg); err != nil { - return fmt.Errorf("failed to init the trace: %w", err) - } - if err := n.initL1(ctx, cfg); err != nil { - return fmt.Errorf("failed to init L1: %w", err) - } if err := n.initL1BeaconAPI(ctx, cfg); err != nil { return err } + if err := n.initL1Source(ctx, cfg); err != nil { + return fmt.Errorf("failed to init L1 Source: %w", err) + } if err := n.initL2(ctx, cfg); err != nil { return fmt.Errorf("failed to init L2: %w", err) } + if err := n.initL1Handlers(cfg); err != nil { + return fmt.Errorf("failed to init L1 Handlers: %w", err) + } if err := n.initRuntimeConfig(ctx, cfg); err != nil { // depends on L2, to signal initial runtime values to return fmt.Errorf("failed to init the runtime config: %w", err) } @@ -180,17 +179,9 @@ func (n *OpNode) initEventSystem() { sys.Register("node", event.DeriverFunc(n.onEvent)) n.eventSys = sys n.eventDrain = executor - n.apiEmitter = sys.Register("node-api", nil) } -func (n *OpNode) initTracer(ctx context.Context, cfg *config.Config) error { - if cfg.Tracer != nil { - n.eventSys.Register("tracer", tracer.NewTracerDeriver(cfg.Tracer)) - } - return nil -} - -func (n *OpNode) initL1(ctx context.Context, cfg *config.Config) error { +func (n *OpNode) initL1Source(ctx context.Context, cfg *config.Config) error { // Cache 3/2 worth of sequencing window of receipts and txs defaultCacheSize := int(cfg.Rollup.SeqWindowSize) * 3 / 2 l1RPC, l1Cfg, err := cfg.L1.Setup(ctx, n.log, defaultCacheSize, n.metrics) @@ -203,19 +194,36 @@ func (n *OpNode) initL1(ctx context.Context, cfg *config.Config) error { return fmt.Errorf("failed to create L1 source: %w", err) } - if err := cfg.Rollup.ValidateL1Config(ctx, n.l1Source); err != nil { + if err := cfg.Rollup.ValidateL1Config(ctx, n.log, n.l1Source); err != nil { return fmt.Errorf("failed to validate the L1 config: %w", err) } - emitter := n.eventSys.Register("l1-signals", nil) + return nil +} + +func (n *OpNode) initL1Handlers(cfg *config.Config) error { + if n.l2Driver == nil { + return errors.New("l2 driver must be initialized") + } onL1Head := func(ctx context.Context, sig eth.L1BlockRef) { - emitter.Emit(ctx, status.L1UnsafeEvent{L1Unsafe: sig}) + // TODO(#16917) Remove Event System Refactor Comments + // L1UnsafeEvent fan out is updated to procedural method calls + if n.cfg.Tracer != nil { + n.cfg.Tracer.OnNewL1Head(ctx, sig) + } + n.l2Driver.SyncDeriver.L1Tracker.OnL1Unsafe(sig) + n.l2Driver.StatusTracker.OnL1Unsafe(sig) + n.l2Driver.SyncDeriver.OnL1Unsafe(ctx) } onL1Safe := func(ctx context.Context, sig eth.L1BlockRef) { - emitter.Emit(ctx, status.L1SafeEvent{L1Safe: sig}) + n.l2Driver.StatusTracker.OnL1Safe(sig) } onL1Finalized := func(ctx context.Context, sig eth.L1BlockRef) { - emitter.Emit(ctx, finality.FinalizeL1Event{FinalizedL1: sig}) + // TODO(#16917) Remove Event System Refactor Comments + // FinalizeL1Event fan out is updated to procedural method calls + n.l2Driver.StatusTracker.OnL1Finalized(sig) + n.l2Driver.Finalizer.OnL1Finalized(sig) + n.l2Driver.SyncDeriver.OnL1Finalized(ctx) } // Keep subscribed to the L1 heads, which keeps the L1 maintainer pointing to the best headers to sync @@ -239,6 +247,7 @@ func (n *OpNode) initL1(ctx context.Context, cfg *config.Config) error { cfg.L1EpochPollInterval, time.Second*10) n.l1FinalizedSub = eth.PollBlockChanges(n.log, n.l1Source, onL1Finalized, eth.Finalized, cfg.L1EpochPollInterval, time.Second*10) + return nil } @@ -452,9 +461,16 @@ func (n *OpNode) initL2(ctx context.Context, cfg *config.Config) error { return fmt.Errorf("cfg.Rollup.ChainOpConfig is nil. Please see https://github.com/ethereum-optimism/optimism/releases/tag/op-node/v1.11.0: %w", err) } - n.l2Driver = driver.NewDriver(n.eventSys, n.eventDrain, &cfg.Driver, &cfg.Rollup, cfg.DependencySet, n.l2Source, n.l1Source, + n.l2Driver = driver.NewDriver(n.eventSys, n.eventDrain, &cfg.Driver, &cfg.Rollup, cfg.L1ChainConfig, cfg.DependencySet, n.l2Source, n.l1Source, n.beacon, n, n, n.log, n.metrics, cfg.ConfigPersistence, n.safeDB, &cfg.Sync, sequencerConductor, altDA, indexingMode, dacClient) + // Wire up IndexingMode to engine controller for direct procedure call + if n.interopSys != nil { + if indexingMode, ok := n.interopSys.(*indexing.IndexingMode); ok { + indexingMode.SetEngineController(n.l2Driver.SyncDeriver.Engine) + } + } + return nil } @@ -472,7 +488,7 @@ func (n *OpNode) initRPCServer(cfg *config.Config) error { if cfg.ExperimentalOPStackAPI { server.AddAPI(rpc.API{ Namespace: "opstack", - Service: NewOpstackAPI(n.l2Driver.Engine, n), + Service: NewOpstackAPI(n.l2Driver.SyncDeriver.Engine, n), }) n.log.Info("Experimental OP stack API enabled") } @@ -535,9 +551,12 @@ func (n *OpNode) initP2P(cfg *config.Config) (err error) { panic("p2p node already initialized") } if n.p2pEnabled() { - em := n.eventSys.Register("p2p-block-receiver", nil) - rec := p2p.NewBlockReceiver(n.log, em, n.metrics) - n.p2pNode, err = p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, rec, n.l2Source, n.runCfg, n.metrics, false) + if n.l2Driver.SyncDeriver == nil { + panic("SyncDeriver must be initialized") + } + // embed syncDeriver and tracer(optional) to the blockReceiver to handle unsafe payloads via p2p + rec := p2p.NewBlockReceiver(n.log, n.metrics, n.l2Driver.SyncDeriver, n.cfg.Tracer) + n.p2pNode, err = p2p.NewNodeP2P(n.resourcesCtx, &cfg.Rollup, n.log, cfg.P2P, rec, n.l2Source, n.runCfg, n.metrics) if err != nil { return } @@ -590,7 +609,9 @@ func (n *OpNode) onEvent(ctx context.Context, ev event.Event) bool { } func (n *OpNode) PublishBlock(ctx context.Context, signedEnvelope *opsigner.SignedExecutionPayloadEnvelope) error { - n.apiEmitter.Emit(ctx, tracer.TracePublishBlockEvent{Envelope: signedEnvelope.Envelope}) + if n.tracer != nil { + n.tracer.OnPublishL2Payload(ctx, signedEnvelope.Envelope) + } if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil { n.log.Info("Publishing signed execution payload on p2p", "id", signedEnvelope.ID()) return p2pNode.GossipOut().PublishSignedL2Payload(ctx, signedEnvelope) @@ -599,7 +620,9 @@ func (n *OpNode) PublishBlock(ctx context.Context, signedEnvelope *opsigner.Sign } func (n *OpNode) SignAndPublishL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) error { - n.apiEmitter.Emit(ctx, tracer.TracePublishBlockEvent{Envelope: envelope}) + if n.tracer != nil { + n.tracer.OnPublishL2Payload(ctx, envelope) + } // publish to p2p, if we are running p2p at all if p2pNode := n.getP2PNodeIfEnabled(); p2pNode != nil { if n.p2pSigner == nil { diff --git a/op-node/node/server_test.go b/op-node/node/server_test.go index 6a86626593809..fccd03d2b9dd5 100644 --- a/op-node/node/server_test.go +++ b/op-node/node/server_test.go @@ -314,8 +314,8 @@ func (c *mockDriverClient) SequencerActive(ctx context.Context) (bool, error) { return c.Mock.MethodCalled("SequencerActive").Get(0).(bool), nil } -func (c *mockDriverClient) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { - return c.Mock.MethodCalled("OnUnsafeL2Payload").Get(0).(error) +func (c *mockDriverClient) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) { + c.Mock.MethodCalled("OnUnsafeL2Payload") } func (c *mockDriverClient) OverrideLeader(ctx context.Context) error { diff --git a/op-node/node/tracer/comms.go b/op-node/node/tracer/comms.go index c673093d8ecb1..172eeeae91fee 100644 --- a/op-node/node/tracer/comms.go +++ b/op-node/node/tracer/comms.go @@ -5,60 +5,11 @@ import ( "github.com/libp2p/go-libp2p/core/peer" - "github.com/ethereum-optimism/optimism/op-node/p2p" - "github.com/ethereum-optimism/optimism/op-node/rollup/status" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" ) -// Tracer configures the OpNode to share events type Tracer interface { OnNewL1Head(ctx context.Context, sig eth.L1BlockRef) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *eth.ExecutionPayloadEnvelope) OnPublishL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) } - -type TracePublishBlockEvent struct { - Envelope *eth.ExecutionPayloadEnvelope -} - -func (ev TracePublishBlockEvent) String() string { - return "trace-publish-event" -} - -// TracerDeriver hooks a Tracer up to the event system as deriver -type TracerDeriver struct { - tracer Tracer - ctx context.Context - cancel context.CancelFunc -} - -var _ event.Deriver = (*TracerDeriver)(nil) -var _ event.Unattacher = (*TracerDeriver)(nil) - -func NewTracerDeriver(tracer Tracer) *TracerDeriver { - ctx, cancel := context.WithCancel(context.Background()) - return &TracerDeriver{ - tracer: tracer, - ctx: ctx, - cancel: cancel, - } -} - -func (t *TracerDeriver) OnEvent(ctx context.Context, ev event.Event) bool { - switch x := ev.(type) { - case status.L1UnsafeEvent: - t.tracer.OnNewL1Head(t.ctx, x.L1Unsafe) - case p2p.ReceivedBlockEvent: - t.tracer.OnUnsafeL2Payload(t.ctx, x.From, x.Envelope) - case TracePublishBlockEvent: - t.tracer.OnPublishL2Payload(t.ctx, x.Envelope) - default: - return false - } - return true -} - -func (t *TracerDeriver) Unattach() { - t.cancel() -} diff --git a/op-node/node/tracer/comms_test.go b/op-node/node/tracer/comms_test.go deleted file mode 100644 index 85f904c0e8ed5..0000000000000 --- a/op-node/node/tracer/comms_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package tracer - -import ( - "context" - "math/rand" - "testing" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-node/p2p" - "github.com/ethereum-optimism/optimism/op-node/rollup/status" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testutils" -) - -type testTracer struct { - got string -} - -func (t *testTracer) OnNewL1Head(ctx context.Context, sig eth.L1BlockRef) { - t.got += "L1Head: " + sig.ID().String() + "\n" -} - -func (t *testTracer) OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *eth.ExecutionPayloadEnvelope) { - t.got += "P2P in: from: " + string(from) + " id: " + payload.ID().String() + "\n" -} - -func (t *testTracer) OnPublishL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) { - t.got += "P2P out: " + payload.ID().String() + "\n" -} - -var _ Tracer = (*testTracer)(nil) - -// TestTracer tests that the tracer traces each thing as expected -func TestTracer(t *testing.T) { - tr := &testTracer{} - d := NewTracerDeriver(tr) - rng := rand.New(rand.NewSource(123)) - - l1Head := testutils.RandomBlockRef(rng) - d.OnEvent(context.Background(), status.L1UnsafeEvent{L1Unsafe: l1Head}) - require.Equal(t, "L1Head: "+l1Head.ID().String()+"\n", tr.got) - tr.got = "" - - id := testutils.RandomBlockID(rng) - block := ð.ExecutionPayloadEnvelope{ - ExecutionPayload: ð.ExecutionPayload{ - BlockHash: id.Hash, BlockNumber: eth.Uint64Quantity(id.Number)}} - - d.OnEvent(context.Background(), p2p.ReceivedBlockEvent{From: "foo", Envelope: block}) - require.Equal(t, "P2P in: from: foo id: "+id.String()+"\n", tr.got) - tr.got = "" - - d.OnEvent(context.Background(), TracePublishBlockEvent{Envelope: block}) - require.Equal(t, "P2P out: "+id.String()+"\n", tr.got) - tr.got = "" - - require.NoError(t, d.ctx.Err()) - d.Unattach() - require.Error(t, d.ctx.Err()) -} diff --git a/op-node/p2p/event.go b/op-node/p2p/event.go index 4b4f4827a5915..2efae7dbb69ab 100644 --- a/op-node/p2p/event.go +++ b/op-node/p2p/event.go @@ -8,37 +8,40 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" ) -type ReceivedBlockEvent struct { - From peer.ID - Envelope *eth.ExecutionPayloadEnvelope +type BlockReceiverMetrics interface { + RecordReceivedUnsafePayload(payload *eth.ExecutionPayloadEnvelope) } -func (ev ReceivedBlockEvent) String() string { - return "received-block-event" +type SyncDeriver interface { + OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) } -type BlockReceiverMetrics interface { - RecordReceivedUnsafePayload(payload *eth.ExecutionPayloadEnvelope) +type Tracer interface { + OnUnsafeL2Payload(ctx context.Context, from peer.ID, payload *eth.ExecutionPayloadEnvelope) } // BlockReceiver can be plugged into the P2P gossip stack, -// to receive payloads as ReceivedBlockEvent events. +// to receive payloads and call syncDeriver to toss unsafe payload type BlockReceiver struct { log log.Logger - emitter event.Emitter metrics BlockReceiverMetrics + + // syncDeriver embedded for triggering unsafe payload sync via p2p + syncDeriver SyncDeriver + // Tracer embedded for tracing unsafe payload + tracer Tracer } var _ GossipIn = (*BlockReceiver)(nil) -func NewBlockReceiver(log log.Logger, em event.Emitter, metrics BlockReceiverMetrics) *BlockReceiver { +func NewBlockReceiver(log log.Logger, metrics BlockReceiverMetrics, syncDeriver SyncDeriver, tracer Tracer) *BlockReceiver { return &BlockReceiver{ - log: log, - emitter: em, - metrics: metrics, + log: log, + metrics: metrics, + syncDeriver: syncDeriver, + tracer: tracer, } } @@ -47,6 +50,9 @@ func (g *BlockReceiver) OnUnsafeL2Payload(ctx context.Context, from peer.ID, msg "id", msg.ExecutionPayload.ID(), "peer", from, "txs", len(msg.ExecutionPayload.Transactions)) g.metrics.RecordReceivedUnsafePayload(msg) - g.emitter.Emit(ctx, ReceivedBlockEvent{From: from, Envelope: msg}) + g.syncDeriver.OnUnsafeL2Payload(ctx, msg) + if g.tracer != nil { // tracer is optional + g.tracer.OnUnsafeL2Payload(ctx, from, msg) + } return nil } diff --git a/op-node/p2p/host_test.go b/op-node/p2p/host_test.go index 6d55b36a3cc6b..9f461b2fed792 100644 --- a/op-node/p2p/host_test.go +++ b/op-node/p2p/host_test.go @@ -120,7 +120,7 @@ func TestP2PFull(t *testing.T) { runCfgB := &testutils.MockRuntimeConfig{P2PSeqAddress: common.Address{0x42}} logA := testlog.Logger(t, log.LevelError).New("host", "A") - nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false) + nodeA, err := NewNodeP2P(context.Background(), &rollup.Config{}, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics) require.NoError(t, err) defer nodeA.Close() @@ -149,7 +149,7 @@ func TestP2PFull(t *testing.T) { logB := testlog.Logger(t, log.LevelError).New("host", "B") - nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false) + nodeB, err := NewNodeP2P(context.Background(), &rollup.Config{}, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics) require.NoError(t, err) defer nodeB.Close() hostB := nodeB.Host() @@ -321,7 +321,7 @@ func TestDiscovery(t *testing.T) { resourcesCtx, resourcesCancel := context.WithCancel(context.Background()) defer resourcesCancel() - nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics, false) + nodeA, err := NewNodeP2P(context.Background(), rollupCfg, logA, &confA, &mockGossipIn{}, nil, runCfgA, metrics.NoopMetrics) require.NoError(t, err) defer nodeA.Close() hostA := nodeA.Host() @@ -336,7 +336,7 @@ func TestDiscovery(t *testing.T) { confB.DiscoveryDB = discDBC // Start B - nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics, false) + nodeB, err := NewNodeP2P(context.Background(), rollupCfg, logB, &confB, &mockGossipIn{}, nil, runCfgB, metrics.NoopMetrics) require.NoError(t, err) defer nodeB.Close() hostB := nodeB.Host() @@ -351,7 +351,7 @@ func TestDiscovery(t *testing.T) { }}) // Start C - nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, nil, runCfgC, metrics.NoopMetrics, false) + nodeC, err := NewNodeP2P(context.Background(), rollupCfg, logC, &confC, &mockGossipIn{}, nil, runCfgC, metrics.NoopMetrics) require.NoError(t, err) defer nodeC.Close() hostC := nodeC.Host() diff --git a/op-node/p2p/node.go b/op-node/p2p/node.go index e5c3dccbc31cf..149eec5a8f8e1 100644 --- a/op-node/p2p/node.go +++ b/op-node/p2p/node.go @@ -61,7 +61,6 @@ func NewNodeP2P( l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer, - elSyncEnabled bool, ) (*NodeP2P, error) { if setup == nil { return nil, errors.New("p2p node cannot be created without setup") @@ -70,7 +69,7 @@ func NewNodeP2P( return nil, errors.New("SetupP2P.Disabled is true") } var n NodeP2P - if err := n.init(resourcesCtx, rollupCfg, log, setup, gossipIn, l2Chain, runCfg, metrics, elSyncEnabled); err != nil { + if err := n.init(resourcesCtx, rollupCfg, log, setup, gossipIn, l2Chain, runCfg, metrics); err != nil { closeErr := n.Close() if closeErr != nil { log.Error("failed to close p2p after starting with err", "closeErr", closeErr, "err", err) @@ -94,7 +93,6 @@ func (n *NodeP2P) init( l2Chain L2Chain, runCfg GossipRuntimeConfig, metrics metrics.Metricer, - elSyncEnabled bool, ) error { bwc := p2pmetrics.NewBandwidthCounter() @@ -131,7 +129,7 @@ func (n *NodeP2P) init( n.appScorer = &NoopApplicationScorer{} } // Activate the P2P req-resp sync if enabled by feature-flag. - if setup.ReqRespSyncEnabled() && !elSyncEnabled { + if setup.ReqRespSyncEnabled() { n.syncCl = NewSyncClient(log, rollupCfg, n.host, gossipIn.OnUnsafeL2Payload, metrics, n.appScorer) n.host.Network().Notify(&network.NotifyBundle{ ConnectedF: func(nw network.Network, conn network.Conn) { diff --git a/op-node/p2p/sync.go b/op-node/p2p/sync.go index f06dae7644f52..2e056030935bd 100644 --- a/op-node/p2p/sync.go +++ b/op-node/p2p/sync.go @@ -717,7 +717,7 @@ func (s *SyncClient) doRequest(ctx context.Context, id peer.ID, expectedBlockNum select { case s.results <- syncResult{payload: envelope, peer: id}: case <-ctx.Done(): - return fmt.Errorf("failed to process response, sync client is too busy: %w", err) + return fmt.Errorf("failed to process response, sync client is too busy") } return nil } diff --git a/op-node/rollup/attributes/attributes.go b/op-node/rollup/attributes/attributes.go index 19356a4197882..15d75e7d8d0f1 100644 --- a/op-node/rollup/attributes/attributes.go +++ b/op-node/rollup/attributes/attributes.go @@ -17,6 +17,17 @@ import ( "github.com/ethereum-optimism/optimism/op-service/event" ) +// EngineController provides direct calls into the EngineController that +// external components can use instead of emitting events. +type EngineController interface { + // TryUpdatePendingSafe updates the pending safe head if the new reference is newer + TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) + // TryUpdateLocalSafe updates the local safe head if the new reference is newer and concluding + TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) + RequestForkchoiceUpdate(ctx context.Context) + RequestPendingSafeUpdate(ctx context.Context) +} + type L2 interface { PayloadByNumber(context.Context, uint64) (*eth.ExecutionPayloadEnvelope, error) } @@ -36,15 +47,21 @@ type AttributesHandler struct { attributes *derive.AttributesWithParent sentAttributes bool + + engineController EngineController } -func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Context, l2 L2) *AttributesHandler { +func NewAttributesHandler(log log.Logger, cfg *rollup.Config, ctx context.Context, l2 L2, engController EngineController) *AttributesHandler { + if engController == nil { + panic("engController cannot be nil") + } return &AttributesHandler{ - log: log, - cfg: cfg, - ctx: ctx, - l2: l2, - attributes: nil, + log: log, + cfg: cfg, + ctx: ctx, + l2: l2, + engineController: engController, + attributes: nil, } } @@ -52,6 +69,17 @@ func (eq *AttributesHandler) AttachEmitter(em event.Emitter) { eq.emitter = em } +func (eq *AttributesHandler) forceResetLocked() { + eq.sentAttributes = false + eq.attributes = nil +} + +func (eq *AttributesHandler) ForceReset(ctx context.Context, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) { + eq.mu.Lock() + defer eq.mu.Unlock() + eq.forceResetLocked() +} + func (eq *AttributesHandler) OnEvent(ctx context.Context, ev event.Event) bool { // Events may be concurrent in the future. Prevent unsafe concurrent modifications to the attributes. eq.mu.Lock() @@ -65,10 +93,9 @@ func (eq *AttributesHandler) OnEvent(ctx context.Context, ev event.Event) bool { eq.sentAttributes = false eq.emitter.Emit(ctx, derive.ConfirmReceivedAttributesEvent{}) // to make sure we have a pre-state signal to process the attributes from - eq.emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) - case rollup.ResetEvent, rollup.ForceResetEvent: - eq.sentAttributes = false - eq.attributes = nil + eq.engineController.RequestPendingSafeUpdate(ctx) + case rollup.ResetEvent: + eq.forceResetLocked() case rollup.EngineTemporaryErrorEvent: eq.sentAttributes = false case engine.InvalidPayloadAttributesEvent: @@ -81,7 +108,7 @@ func (eq *AttributesHandler) OnEvent(ctx context.Context, ev event.Event) bool { eq.attributes = nil // Time to re-evaluate without attributes. // (the pending-safe state will then be forwarded to our source of attributes). - eq.emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + eq.engineController.RequestPendingSafeUpdate(ctx) case engine.PayloadSealExpiredErrorEvent: if x.DerivedFrom == (eth.L1BlockRef{}) { return true // from sequencing @@ -98,7 +125,7 @@ func (eq *AttributesHandler) OnEvent(ctx context.Context, ev event.Event) bool { "build_id", x.Info.ID, "timestamp", x.Info.Timestamp, "err", x.Err) eq.sentAttributes = false eq.attributes = nil - eq.emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + eq.engineController.RequestPendingSafeUpdate(ctx) default: return false } @@ -200,11 +227,8 @@ func (eq *AttributesHandler) consolidateNextSafeAttributes(attributes *derive.At eq.log.Error("Failed to compute block-ref from execution payload") return } - eq.emitter.Emit(eq.ctx, engine.PromotePendingSafeEvent{ - Ref: ref, - Concluding: attributes.Concluding, - Source: attributes.DerivedFrom, - }) + eq.engineController.TryUpdatePendingSafe(eq.ctx, ref, attributes.Concluding, attributes.DerivedFrom) + eq.engineController.TryUpdateLocalSafe(eq.ctx, ref, attributes.Concluding, attributes.DerivedFrom) } // unsafe head stays the same, we did not reorg the chain. diff --git a/op-node/rollup/attributes/attributes_test.go b/op-node/rollup/attributes/attributes_test.go index aaf5359b89938..168255dbc6122 100644 --- a/op-node/rollup/attributes/attributes_test.go +++ b/op-node/rollup/attributes/attributes_test.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -86,7 +87,7 @@ func TestAttributesHandler(t *testing.T) { emptyWithdrawals := make(types.Withdrawals, 0) - a1L1Info, err := derive.L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, 1, aL1Info, refA0.Time+cfg.BlockTime) + a1L1Info, err := derive.L1InfoDepositBytes(cfg, params.MergedTestChainConfig, cfg.Genesis.SystemConfig, 1, aL1Info, refA0.Time+cfg.BlockTime) require.NoError(t, err) parentBeaconBlockRoot := testutils.RandomHash(rng) payloadA1 := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ @@ -170,21 +171,24 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{ Attributes: attrA1, }) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes, "queue the invalid attributes") - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), engine.InvalidPayloadAttributesEvent{ Attributes: attrA1, }) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.Nil(t, ah.attributes, "drop the invalid attributes") }) @@ -192,14 +196,16 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{ Attributes: attrA1, }) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes) // New attributes will have to get generated after processing the last ones @@ -217,14 +223,16 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{ Attributes: attrA1, }) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes) @@ -243,13 +251,15 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) // attrA1Alt does not match block A1, so will cause force-reorg. emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{Attributes: attrA1Alt}) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes, "queued up derived attributes") @@ -280,7 +290,8 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) attr := &derive.AttributesWithParent{ @@ -290,23 +301,24 @@ func TestAttributesHandler(t *testing.T) { DerivedFrom: refB, } emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{Attributes: attr}) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes, "queued up derived attributes") // Call during consolidation. l2.ExpectPayloadByNumber(refA1.Number, payloadA1, nil) - emitter.ExpectOnce(engine.PromotePendingSafeEvent{ - Ref: refA1, - Concluding: concluding, - Source: refB, - }) + // AttributesHandler will call EngDeriver methods for updating pending safe and local safe + engDeriver.On("TryUpdatePendingSafe", ah.ctx, refA1, concluding, refB).Return() + engDeriver.On("TryUpdateLocalSafe", ah.ctx, refA1, concluding, refB).Return() + ah.OnEvent(context.Background(), engine.PendingSafeUpdateEvent{ PendingSafe: refA0, Unsafe: refA1, }) + engDeriver.AssertExpectations(t) l2.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes, "still have attributes, processing still unconfirmed") @@ -334,12 +346,14 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) emitter.ExpectOnce(derive.ConfirmReceivedAttributesEvent{}) - emitter.ExpectOnce(engine.PendingSafeRequestEvent{}) + engDeriver.On("RequestPendingSafeUpdate", context.Background()).Once() ah.OnEvent(context.Background(), derive.DerivedAttributesEvent{Attributes: attrA1Alt}) + engDeriver.AssertExpectations(t) emitter.AssertExpectations(t) require.NotNil(t, ah.attributes, "queued up derived attributes") @@ -371,7 +385,8 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) emitter.ExpectOnceType("ResetEvent") @@ -387,7 +402,8 @@ func TestAttributesHandler(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l2 := &testutils.MockL2Client{} emitter := &testutils.MockEmitter{} - ah := NewAttributesHandler(logger, cfg, context.Background(), l2) + engDeriver := &MockEngineController{} + ah := NewAttributesHandler(logger, cfg, context.Background(), l2, engDeriver) ah.AttachEmitter(emitter) // If there are no attributes, we expect the pipeline to be requested to generate attributes. diff --git a/op-node/rollup/attributes/engine_consolidate.go b/op-node/rollup/attributes/engine_consolidate.go index 19a2b2aea8fe5..2c149686bf1c1 100644 --- a/op-node/rollup/attributes/engine_consolidate.go +++ b/op-node/rollup/attributes/engine_consolidate.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -75,7 +74,7 @@ func AttributesMatchBlock(rollupCfg *rollup.Config, attrs *eth.PayloadAttributes if attrs.SuggestedFeeRecipient != block.FeeRecipient { return fmt.Errorf("fee recipient data does not match, expected %s but got %s", block.FeeRecipient, attrs.SuggestedFeeRecipient) } - if err := checkEIP1559ParamsMatch(rollupCfg.ChainOpConfig, attrs.EIP1559Params, block.ExtraData); err != nil { + if err := checkExtraDataParamsMatch(rollupCfg, uint64(block.Timestamp), attrs.EIP1559Params, attrs.MinBaseFee, block.ExtraData); err != nil { return err } @@ -97,7 +96,7 @@ func checkParentBeaconBlockRootMatch(attrRoot, blockRoot *common.Hash) error { return nil } -func checkEIP1559ParamsMatch(opCfg *params.OptimismConfig, attrParams *eth.Bytes8, blockExtraData []byte) error { +func checkExtraDataParamsMatch(cfg *rollup.Config, blockTimestamp uint64, attrParams *eth.Bytes8, attrMinBaseFee *uint64, blockExtraData []byte) error { // Note that we can assume that the attributes' eip1559params are non-nil iff Holocene is active // according to the local rollup config. if attrParams != nil { @@ -109,10 +108,6 @@ func checkEIP1559ParamsMatch(opCfg *params.OptimismConfig, attrParams *eth.Bytes if err := eip1559.ValidateHolocene1559Params(params); err != nil { // This would be a critical error, because the attributes are generated by derivation and must be valid. return fmt.Errorf("invalid attributes EIP1559 parameters: %w", err) - } else if err := eip1559.ValidateHoloceneExtraData(blockExtraData); err != nil { - // This can happen if the unsafe chain contains invalid (in particular, empty) extraData while Holocene - // is active. The extraData field of blocks from sequencer gossip isn't currently checked during import. - return fmt.Errorf("invalid block extraData: %w", err) } ad, ae := eip1559.DecodeHolocene1559Params(params) @@ -120,12 +115,18 @@ func checkEIP1559ParamsMatch(opCfg *params.OptimismConfig, attrParams *eth.Bytes // Translate 0,0 to the pre-Holocene protocol constants, like the EL does too. if ad == 0 { // If attrParams are non-nil, Holocene, and so Canyon, must be active. - ad = *opCfg.EIP1559DenominatorCanyon - ae = opCfg.EIP1559Elasticity + ad = *cfg.ChainOpConfig.EIP1559DenominatorCanyon + ae = cfg.ChainOpConfig.EIP1559Elasticity translated = true } - bd, be := eip1559.DecodeHoloceneExtraData(blockExtraData) + // Decode block parameters and check for mismatch + err := eip1559.ValidateOptimismExtraData(cfg, blockTimestamp, blockExtraData) + if err != nil { + return fmt.Errorf("invalid block extraData: %w", err) + } + bd, be, bm := eip1559.DecodeOptimismExtraData(cfg, blockTimestamp, blockExtraData) + if ad != bd || ae != be { extraErr := "" if translated { @@ -133,6 +134,9 @@ func checkEIP1559ParamsMatch(opCfg *params.OptimismConfig, attrParams *eth.Bytes } return fmt.Errorf("eip1559 parameters do not match, attributes: %d, %d%s, block: %d, %d", ad, ae, extraErr, bd, be) } + if bm == nil && attrMinBaseFee != nil || bm != nil && attrMinBaseFee == nil || bm != nil && attrMinBaseFee != nil && *bm != *attrMinBaseFee { + return fmt.Errorf("minBaseFee does not match, attributes: %d, block: %d", attrMinBaseFee, bm) + } } else if len(blockExtraData) > 0 { // When deriving pre-Holocene blocks, the extraData must be empty. return fmt.Errorf("nil EIP1559Params in attributes but non-nil extraData in block: %v", blockExtraData) @@ -162,7 +166,7 @@ func checkWithdrawals(rollupCfg *rollup.Config, attrs *eth.PayloadAttributes, bl return fmt.Errorf("%w: attributes", ErrCanyonMustHaveWithdrawals) } if !isIsthmus { - // canyon: the withdrawals root should be set to the empty value + // canyon: the withdrawals root should be set to the empty withdrawals hash if block.WithdrawalsRoot != nil && *block.WithdrawalsRoot != types.EmptyWithdrawalsHash { return fmt.Errorf("%w: got %v", ErrCanyonWithdrawalsRoot, *block.WithdrawalsRoot) } diff --git a/op-node/rollup/attributes/engine_consolidate_test.go b/op-node/rollup/attributes/engine_consolidate_test.go index ad1af4ce65b22..702355ae0da9f 100644 --- a/op-node/rollup/attributes/engine_consolidate_test.go +++ b/op-node/rollup/attributes/engine_consolidate_test.go @@ -36,7 +36,7 @@ type matchArgs struct { parentHash common.Hash } -func holoceneArgs() matchArgs { +func jovianArgs() matchArgs { var ( validParentHash = common.HexToHash("0x123") validTimestamp = eth.Uint64Quantity(50) @@ -46,24 +46,29 @@ func holoceneArgs() matchArgs { validFeeRecipient = predeploys.SequencerFeeVaultAddr validTx = testutils.RandomLegacyTxNotProtected(rand.New(rand.NewSource(42))) validTxData, _ = validTx.MarshalBinary() + minBaseFee = uint64(1e9) - validHoloceneExtraData = eth.BytesMax32(eip1559.EncodeHoloceneExtraData( - *defaultOpConfig.EIP1559DenominatorCanyon, defaultOpConfig.EIP1559Elasticity)) - validHoloceneEIP1559Params = new(eth.Bytes8) + validJovianExtraData = eth.BytesMax32(eip1559.EncodeMinBaseFeeExtraData( + *defaultOpConfig.EIP1559DenominatorCanyon, defaultOpConfig.EIP1559Elasticity, minBaseFee)) + validJovianEIP1559Params = new(eth.Bytes8) ) + // Populate the EIP1559 params with the encoded values + copy((*validJovianEIP1559Params)[:], eip1559.EncodeHolocene1559Params( + *defaultOpConfig.EIP1559DenominatorCanyon, defaultOpConfig.EIP1559Elasticity)) return matchArgs{ envelope: ð.ExecutionPayloadEnvelope{ ParentBeaconBlockRoot: &validParentBeaconRoot, ExecutionPayload: ð.ExecutionPayload{ - ParentHash: validParentHash, - Timestamp: validTimestamp, - PrevRandao: validPrevRandao, - GasLimit: validGasLimit, - Transactions: []eth.Data{validTxData}, - Withdrawals: &types.Withdrawals{}, - FeeRecipient: validFeeRecipient, - ExtraData: validHoloceneExtraData, + ParentHash: validParentHash, + Timestamp: validTimestamp, + PrevRandao: validPrevRandao, + GasLimit: validGasLimit, + Transactions: []eth.Data{validTxData}, + Withdrawals: &types.Withdrawals{}, + FeeRecipient: validFeeRecipient, + ExtraData: validJovianExtraData, + WithdrawalsRoot: &types.EmptyWithdrawalsHash, }, }, attrs: ð.PayloadAttributes{ @@ -74,12 +79,41 @@ func holoceneArgs() matchArgs { Transactions: []eth.Data{validTxData}, Withdrawals: &types.Withdrawals{}, SuggestedFeeRecipient: validFeeRecipient, - EIP1559Params: validHoloceneEIP1559Params, + EIP1559Params: validJovianEIP1559Params, + MinBaseFee: &minBaseFee, }, parentHash: validParentHash, } } +func jovianArgsMinBaseFeeMissingFromAttributes() matchArgs { + args := jovianArgs() + args.attrs.MinBaseFee = nil + return args +} + +func jovianArgsMinBaseFeeMissingFromBlock() matchArgs { + args := jovianArgs() + args.envelope.ExecutionPayload.ExtraData = eth.BytesMax32(eip1559.EncodeHoloceneExtraData( + *defaultOpConfig.EIP1559DenominatorCanyon, defaultOpConfig.EIP1559Elasticity)) // Note use of HoloceneExtraData instead of JovianExtraData + return args +} + +func jovianArgsInconsistentMinBaseFee() matchArgs { + args := jovianArgs() + args.attrs.MinBaseFee = ptr(uint64(2e9)) + return args +} + +func holoceneArgs() matchArgs { + args := jovianArgs() + args.envelope.ExecutionPayload.ExtraData = eth.BytesMax32(eip1559.EncodeHoloceneExtraData( + *defaultOpConfig.EIP1559DenominatorCanyon, defaultOpConfig.EIP1559Elasticity)) + args.attrs.EIP1559Params = new(eth.Bytes8) + args.attrs.MinBaseFee = nil + return args +} + func ecotoneArgs() matchArgs { args := holoceneArgs() args.attrs.EIP1559Params = nil @@ -184,12 +218,11 @@ func createMismatchedEIP1559Params() matchArgs { } func TestAttributesMatch(t *testing.T) { - // default valid timestamp is 50 - pastTime := uint64(0) - futureTime := uint64(100) - - rollupCfgPreCanyon := &rollup.Config{CanyonTime: &futureTime, ChainOpConfig: defaultOpConfig} - rollupCfgPreIsthmus := &rollup.Config{CanyonTime: &pastTime, IsthmusTime: &futureTime, ChainOpConfig: defaultOpConfig} + cfg := func(fork rollup.ForkName) *rollup.Config { + cfg := &rollup.Config{ChainOpConfig: defaultOpConfig} + cfg.ActivateAtGenesis(fork) + return cfg + } tests := []struct { args matchArgs @@ -199,106 +232,129 @@ func TestAttributesMatch(t *testing.T) { }{ { args: bedrockArgs(), - rollupCfg: rollupCfgPreCanyon, + rollupCfg: cfg(rollup.Bedrock), desc: "validBedrockArgs", }, { args: bedrockArgs(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Canyon), err: ErrCanyonMustHaveWithdrawals.Error() + ": block", desc: "bedrockArgsPostCanyon", }, { args: canyonArgs(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Canyon), desc: "validCanyonArgs", }, { args: ecotoneArgs(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Ecotone), desc: "validEcotoneArgs", }, { args: holoceneArgs(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), desc: "validholoceneArgs", }, + { + args: jovianArgs(), + rollupCfg: cfg(rollup.Jovian), + desc: "validJovianArgs", + }, { args: mismatchedParentHashArgs(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "parent hash field does not match", desc: "mismatchedParentHashArgs", }, { args: createMismatchedTimestamp(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "timestamp field does not match", desc: "createMismatchedTimestamp", }, { args: createMismatchedPrevRandao(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "random field does not match", desc: "createMismatchedPrevRandao", }, { args: createMismatchedTransactions(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "transaction count does not match", desc: "createMismatchedTransactions", }, { args: ecotoneNoParentBeaconBlockRoot(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "expected non-nil parent beacon block root", desc: "ecotoneNoParentBeaconBlockRoot", }, { args: ecotoneUnexpectedParentBeaconBlockRoot(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "expected nil parent beacon block root but got non-nil", desc: "ecotoneUnexpectedParentBeaconBlockRoot", }, { args: ecotoneMismatchParentBeaconBlockRoot(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Ecotone), err: "parent beacon block root does not match", desc: "ecotoneMismatchParentBeaconBlockRoot", }, { args: ecotoneMismatchParentBeaconBlockRootPtr(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Ecotone), desc: "ecotoneMismatchParentBeaconBlockRootPtr", }, { args: ecotoneNilParentBeaconBlockRoots(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Ecotone), desc: "ecotoneNilParentBeaconBlockRoots", }, { args: createMismatchedGasLimit(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "gas limit does not match", desc: "createMismatchedGasLimit", }, { args: createNilGasLimit(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "expected gaslimit in attributes to not be nil", desc: "createNilGasLimit", }, { args: createMismatchedFeeRecipient(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "fee recipient data does not match", desc: "createMismatchedFeeRecipient", }, { args: createMismatchedEIP1559Params(), - rollupCfg: rollupCfgPreIsthmus, + rollupCfg: cfg(rollup.Holocene), err: "eip1559 parameters do not match", desc: "createMismatchedEIP1559Params", }, + { + args: jovianArgsMinBaseFeeMissingFromAttributes(), + rollupCfg: cfg(rollup.Jovian), + err: "minBaseFee does not match", + desc: "missingMinBaseFee", + }, + { + args: jovianArgsMinBaseFeeMissingFromBlock(), + rollupCfg: cfg(rollup.Jovian), + err: "invalid block extraData: MinBaseFee extraData should be 17 bytes, got 9", + desc: "missingMinBaseFee", + }, + { + args: jovianArgsInconsistentMinBaseFee(), + rollupCfg: cfg(rollup.Jovian), + err: "minBaseFee does not match", + desc: "inconsistentMinBaseFee", + }, } for _, test := range tests { @@ -530,7 +586,7 @@ func TestCheckEIP1559ParamsMatch(t *testing.T) { desc: "err-invalid-extra", attrParams: ¶ms, blockExtraData: append(eth.BytesMax32{42}, params[:]...), - err: "invalid block extraData: holocene extraData should have 0 version byte, got 42", + err: "invalid block extraData: holocene extraData version byte should be 0, got 42", }, { desc: "err-no-match", @@ -545,7 +601,15 @@ func TestCheckEIP1559ParamsMatch(t *testing.T) { }, } { t.Run(test.desc, func(t *testing.T) { - err := checkEIP1559ParamsMatch(defaultOpConfig, test.attrParams, test.blockExtraData) + pastTime := uint64(0) + futureTime := uint64(3) + cfg := &rollup.Config{ + CanyonTime: &pastTime, + HoloceneTime: &pastTime, + IsthmusTime: &pastTime, + JovianTime: &futureTime, + ChainOpConfig: defaultOpConfig} + err := checkExtraDataParamsMatch(cfg, uint64(2), test.attrParams, nil, test.blockExtraData) if test.err == "" { require.NoError(t, err) } else { diff --git a/op-node/rollup/attributes/testutils.go b/op-node/rollup/attributes/testutils.go new file mode 100644 index 0000000000000..bd6542c70df5a --- /dev/null +++ b/op-node/rollup/attributes/testutils.go @@ -0,0 +1,30 @@ +package attributes + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/mock" +) + +type MockEngineController struct { + mock.Mock +} + +var _ EngineController = (*MockEngineController)(nil) + +func (m *MockEngineController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + m.Mock.MethodCalled("TryUpdatePendingSafe", ctx, ref, concluding, source) +} + +func (m *MockEngineController) TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + m.Mock.MethodCalled("TryUpdateLocalSafe", ctx, ref, concluding, source) +} + +func (m *MockEngineController) RequestForkchoiceUpdate(ctx context.Context) { + m.Mock.MethodCalled("RequestForkchoiceUpdate", ctx) +} + +func (m *MockEngineController) RequestPendingSafeUpdate(ctx context.Context) { + m.Mock.MethodCalled("RequestPendingSafeUpdate", ctx) +} diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index 1417a593e7cbb..8e62b1b6808bc 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -42,8 +42,8 @@ const ( Granite ForkName = "granite" Holocene ForkName = "holocene" Isthmus ForkName = "isthmus" - Interop ForkName = "interop" Jovian ForkName = "jovian" + Interop ForkName = "interop" // ADD NEW FORKS TO AllForks BELOW! None ForkName = "" ) @@ -58,8 +58,8 @@ var AllForks = []ForkName{ Granite, Holocene, Isthmus, - Interop, Jovian, + Interop, // ADD NEW FORKS HERE! } @@ -123,6 +123,11 @@ func (s *ChainSpec) IsIsthmus(t uint64) bool { return s.config.IsIsthmus(t) } +// IsJovian returns true if t >= jovian_time +func (s *ChainSpec) IsJovian(t uint64) bool { + return s.config.IsJovian(t) +} + // MaxChannelBankSize returns the maximum number of bytes the can allocated inside the channel bank // before pruning occurs at the given timestamp. func (s *ChainSpec) MaxChannelBankSize(t uint64) uint64 { @@ -193,12 +198,12 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { if s.config.IsIsthmus(block.Time) { s.currentFork = Isthmus } - if s.config.IsInterop(block.Time) { - s.currentFork = Interop - } if s.config.IsJovian(block.Time) { s.currentFork = Jovian } + if s.config.IsInterop(block.Time) { + s.currentFork = Interop + } log.Info("Current hardfork version detected", "forkName", s.currentFork) return } @@ -222,10 +227,10 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { foundActivationBlock = s.config.IsHoloceneActivationBlock(block.Time) case Isthmus: foundActivationBlock = s.config.IsIsthmusActivationBlock(block.Time) - case Interop: - foundActivationBlock = s.config.IsInteropActivationBlock(block.Time) case Jovian: foundActivationBlock = s.config.IsJovianActivationBlock(block.Time) + case Interop: + foundActivationBlock = s.config.IsInteropActivationBlock(block.Time) } if foundActivationBlock { diff --git a/op-node/rollup/chain_spec_test.go b/op-node/rollup/chain_spec_test.go index f1d2cf53f9705..02601a4d9d8fb 100644 --- a/op-node/rollup/chain_spec_test.go +++ b/op-node/rollup/chain_spec_test.go @@ -47,8 +47,8 @@ var testConfig = Config{ GraniteTime: u64ptr(60), HoloceneTime: u64ptr(70), IsthmusTime: u64ptr(80), - InteropTime: u64ptr(90), - JovianTime: u64ptr(100), + JovianTime: u64ptr(90), + InteropTime: u64ptr(100), BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"), L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"), @@ -193,21 +193,21 @@ func TestCheckForkActivation(t *testing.T) { expectedLog: "Detected hardfork activation block", }, { - name: "Interop activation", + name: "Jovian activation", block: eth.L2BlockRef{Time: 90, Number: 11, Hash: common.Hash{0xb}}, - expectedCurrentFork: Interop, + expectedCurrentFork: Jovian, expectedLog: "Detected hardfork activation block", }, { - name: "Jovian activation", + name: "Interop activation", block: eth.L2BlockRef{Time: 100, Number: 11, Hash: common.Hash{0xb}}, - expectedCurrentFork: Jovian, + expectedCurrentFork: Interop, expectedLog: "Detected hardfork activation block", }, { name: "No more hardforks", block: eth.L2BlockRef{Time: 700, Number: 12, Hash: common.Hash{0xc}}, - expectedCurrentFork: Jovian, + expectedCurrentFork: Interop, expectedLog: "", }, } diff --git a/op-node/rollup/clsync/clsync.go b/op-node/rollup/clsync/clsync.go deleted file mode 100644 index efc7799bd53e7..0000000000000 --- a/op-node/rollup/clsync/clsync.go +++ /dev/null @@ -1,186 +0,0 @@ -package clsync - -import ( - "context" - "sync" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-node/rollup/engine" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" -) - -// Max memory used for buffering unsafe payloads -const maxUnsafePayloadsMemory = 500 * 1024 * 1024 - -type Metrics interface { - RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) -} - -// CLSync holds on to a queue of received unsafe payloads, -// and tries to apply them to the tip of the chain when requested to. -type CLSync struct { - log log.Logger - cfg *rollup.Config - metrics Metrics - - emitter event.Emitter - - mu sync.Mutex - - unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates -} - -func NewCLSync(log log.Logger, cfg *rollup.Config, metrics Metrics) *CLSync { - return &CLSync{ - log: log, - cfg: cfg, - metrics: metrics, - unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize), - } -} - -func (eq *CLSync) AttachEmitter(em event.Emitter) { - eq.emitter = em -} - -// LowestQueuedUnsafeBlock retrieves the first queued-up L2 unsafe payload, or a zeroed reference if there is none. -func (eq *CLSync) LowestQueuedUnsafeBlock() eth.L2BlockRef { - payload := eq.unsafePayloads.Peek() - if payload == nil { - return eth.L2BlockRef{} - } - ref, err := derive.PayloadToBlockRef(eq.cfg, payload.ExecutionPayload) - if err != nil { - return eth.L2BlockRef{} - } - return ref -} - -type ReceivedUnsafePayloadEvent struct { - Envelope *eth.ExecutionPayloadEnvelope -} - -func (ev ReceivedUnsafePayloadEvent) String() string { - return "received-unsafe-payload" -} - -func (eq *CLSync) OnEvent(ctx context.Context, ev event.Event) bool { - // Events may be concurrent in the future. Prevent unsafe concurrent modifications to the payloads queue. - eq.mu.Lock() - defer eq.mu.Unlock() - - switch x := ev.(type) { - case engine.PayloadInvalidEvent: - eq.onInvalidPayload(x) - case engine.ForkchoiceUpdateEvent: - eq.onForkchoiceUpdate(ctx, x) - case ReceivedUnsafePayloadEvent: - eq.onUnsafePayload(ctx, x) - default: - return false - } - return true -} - -// onInvalidPayload checks if the first next-up payload matches the invalid payload. -// If so, the payload is dropped, to give the next payloads a try. -func (eq *CLSync) onInvalidPayload(x engine.PayloadInvalidEvent) { - eq.log.Debug("CL sync received invalid-payload report", "id", x.Envelope.ExecutionPayload.ID()) - - block := x.Envelope.ExecutionPayload - if peek := eq.unsafePayloads.Peek(); peek != nil && - block.BlockHash == peek.ExecutionPayload.BlockHash { - eq.log.Warn("Dropping invalid unsafe payload", - "hash", block.BlockHash, "number", uint64(block.BlockNumber), - "timestamp", uint64(block.Timestamp)) - eq.unsafePayloads.Pop() - } -} - -// onForkchoiceUpdate peeks at the next applicable unsafe payload, if any, -// to apply on top of the received forkchoice pre-state. -// The payload is held on to until the forkchoice changes (success case) or the payload is reported to be invalid. -func (eq *CLSync) onForkchoiceUpdate(ctx context.Context, x engine.ForkchoiceUpdateEvent) { - eq.log.Debug("CL sync received forkchoice update", - "unsafe", x.UnsafeL2Head, "safe", x.SafeL2Head, "finalized", x.FinalizedL2Head) - - for { - pop, abort := eq.fromQueue(x) - if abort { - return - } - if pop { - eq.unsafePayloads.Pop() - } else { - break - } - } - - firstEnvelope := eq.unsafePayloads.Peek() - - // We don't pop from the queue. If there is a temporary error then we can retry. - // Upon next forkchoice update or invalid-payload event we can remove it from the queue. - eq.emitter.Emit(ctx, engine.ProcessUnsafePayloadEvent{Envelope: firstEnvelope}) -} - -// fromQueue determines what to do with the tip of the payloads-queue, given the forkchoice pre-state. -// If abort, there is nothing to process (either due to empty queue, or unsuitable tip). -// If pop, the tip should be dropped, and processing can repeat from there. -// If not abort or pop, the tip is ready to process. -func (eq *CLSync) fromQueue(x engine.ForkchoiceUpdateEvent) (pop bool, abort bool) { - if eq.unsafePayloads.Len() == 0 { - return false, true - } - firstEnvelope := eq.unsafePayloads.Peek() - first := firstEnvelope.ExecutionPayload - - if first.BlockHash == x.UnsafeL2Head.Hash { - eq.log.Debug("successfully processed payload, removing it from the payloads queue now") - return true, false - } - - if uint64(first.BlockNumber) <= x.SafeL2Head.Number { - eq.log.Info("skipping unsafe payload, since it is older than safe head", "safe", x.SafeL2Head.ID(), "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) - return true, false - } - if uint64(first.BlockNumber) <= x.UnsafeL2Head.Number { - eq.log.Info("skipping unsafe payload, since it is older than unsafe head", "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) - return true, false - } - - // Ensure that the unsafe payload builds upon the current unsafe head - if first.ParentHash != x.UnsafeL2Head.Hash { - if uint64(first.BlockNumber) == x.UnsafeL2Head.Number+1 { - eq.log.Info("skipping unsafe payload, since it does not build onto the existing unsafe chain", "safe", x.SafeL2Head.ID(), "unsafe", x.UnsafeL2Head.ID(), "unsafe_payload", first.ID()) - return true, false - } - return false, true // rollup-node should try something different if it cannot process the first unsafe payload - } - - return false, false -} - -// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1. -func (eq *CLSync) onUnsafePayload(ctx context.Context, x ReceivedUnsafePayloadEvent) { - eq.log.Debug("CL sync received payload", "payload", x.Envelope.ExecutionPayload.ID()) - envelope := x.Envelope - if envelope == nil { - eq.log.Warn("cannot add nil unsafe payload") - return - } - - if err := eq.unsafePayloads.Push(envelope); err != nil { - eq.log.Warn("Could not add unsafe payload", "id", envelope.ExecutionPayload.ID(), "timestamp", uint64(envelope.ExecutionPayload.Timestamp), "err", err) - return - } - p := eq.unsafePayloads.Peek() - eq.metrics.RecordUnsafePayloadsBuffer(uint64(eq.unsafePayloads.Len()), eq.unsafePayloads.MemSize(), p.ExecutionPayload.ID()) - eq.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp)) - - // request forkchoice signal, so we can process the payload maybe - eq.emitter.Emit(ctx, engine.ForkchoiceRequestEvent{}) -} diff --git a/op-node/rollup/clsync/clsync_test.go b/op-node/rollup/clsync/clsync_test.go deleted file mode 100644 index b85956810e2bd..0000000000000 --- a/op-node/rollup/clsync/clsync_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package clsync - -import ( - "context" - "errors" - "math/big" - "math/rand" // nosemgrep - "testing" - - "github.com/holiman/uint256" - "github.com/stretchr/testify/require" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-node/rollup/engine" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-service/testutils" -) - -func TestCLSync(t *testing.T) { - rng := rand.New(rand.NewSource(1234)) - - refA := testutils.RandomBlockRef(rng) - - aL1Info := &testutils.MockBlockInfo{ - InfoParentHash: refA.ParentHash, - InfoNum: refA.Number, - InfoTime: refA.Time, - InfoHash: refA.Hash, - InfoBaseFee: big.NewInt(1), - InfoBlobBaseFee: big.NewInt(1), - InfoReceiptRoot: types.EmptyRootHash, - InfoRoot: testutils.RandomHash(rng), - InfoGasUsed: rng.Uint64(), - } - - refA0 := eth.L2BlockRef{ - Hash: testutils.RandomHash(rng), - Number: 0, - ParentHash: common.Hash{}, - Time: refA.Time, - L1Origin: refA.ID(), - SequenceNumber: 0, - } - gasLimit := eth.Uint64Quantity(20_000_000) - cfg := &rollup.Config{ - Genesis: rollup.Genesis{ - L1: refA.ID(), - L2: refA0.ID(), - L2Time: refA0.Time, - SystemConfig: eth.SystemConfig{ - BatcherAddr: common.Address{42}, - Overhead: [32]byte{123}, - Scalar: [32]byte{42}, - GasLimit: 20_000_000, - }, - }, - BlockTime: 1, - SeqWindowSize: 2, - } - - refA1 := eth.L2BlockRef{ - Hash: testutils.RandomHash(rng), - Number: refA0.Number + 1, - ParentHash: refA0.Hash, - Time: refA0.Time + cfg.BlockTime, - L1Origin: refA.ID(), - SequenceNumber: 1, - } - - altRefA1 := refA1 - altRefA1.Hash = testutils.RandomHash(rng) - - refA2 := eth.L2BlockRef{ - Hash: testutils.RandomHash(rng), - Number: refA1.Number + 1, - ParentHash: refA1.Hash, - Time: refA1.Time + cfg.BlockTime, - L1Origin: refA.ID(), - SequenceNumber: 2, - } - - a1L1Info, err := derive.L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, refA1.SequenceNumber, aL1Info, refA1.Time) - require.NoError(t, err) - payloadA1 := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ - ParentHash: refA1.ParentHash, - FeeRecipient: common.Address{}, - StateRoot: eth.Bytes32{}, - ReceiptsRoot: eth.Bytes32{}, - LogsBloom: eth.Bytes256{}, - PrevRandao: eth.Bytes32{}, - BlockNumber: eth.Uint64Quantity(refA1.Number), - GasLimit: gasLimit, - GasUsed: 0, - Timestamp: eth.Uint64Quantity(refA1.Time), - ExtraData: nil, - BaseFeePerGas: eth.Uint256Quantity(*uint256.NewInt(7)), - BlockHash: refA1.Hash, - Transactions: []eth.Data{a1L1Info}, - }} - a2L1Info, err := derive.L1InfoDepositBytes(cfg, cfg.Genesis.SystemConfig, refA2.SequenceNumber, aL1Info, refA2.Time) - require.NoError(t, err) - payloadA2 := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ - ParentHash: refA2.ParentHash, - FeeRecipient: common.Address{}, - StateRoot: eth.Bytes32{}, - ReceiptsRoot: eth.Bytes32{}, - LogsBloom: eth.Bytes256{}, - PrevRandao: eth.Bytes32{}, - BlockNumber: eth.Uint64Quantity(refA2.Number), - GasLimit: gasLimit, - GasUsed: 0, - Timestamp: eth.Uint64Quantity(refA2.Time), - ExtraData: nil, - BaseFeePerGas: eth.Uint256Quantity(*uint256.NewInt(7)), - BlockHash: refA2.Hash, - Transactions: []eth.Data{a2L1Info}, - }} - - metrics := &testutils.TestDerivationMetrics{} - - // When a previously received unsafe block is older than the tip of the chain, we want to drop it. - t.Run("drop old", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA2, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) // no new events expected to be emitted - - require.Nil(t, cl.unsafePayloads.Peek(), "pop because too old") - }) - - // When we already have the exact payload as tip, then no need to process it - t.Run("drop equal", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) // no new events expected to be emitted - - require.Nil(t, cl.unsafePayloads.Peek(), "pop because seen") - }) - - // When we have a different payload, at the same height, then we want to keep it. - // The unsafe chain consensus preserves the first-seen payload. - t.Run("ignore conflict", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: altRefA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) // no new events expected to be emitted - - require.Nil(t, cl.unsafePayloads.Peek(), "pop because alternative") - }) - - t.Run("ignore unsafe reorg", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA2}) - emitter.AssertExpectations(t) - - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: altRefA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) // no new events expected, since A2 does not fit onto altA1 - - require.Nil(t, cl.unsafePayloads.Peek(), "pop because not applicable") - }) - - t.Run("success", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - emitter.AssertExpectations(t) // nothing to process yet - - require.Nil(t, cl.unsafePayloads.Peek(), "no payloads yet") - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - lowest := cl.LowestQueuedUnsafeBlock() - require.Equal(t, refA1, lowest, "expecting A1 next") - - // payload A1 should be possible to process on top of A0 - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA0, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - - // now pretend the payload was processed: we can drop A1 now - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - require.Nil(t, cl.unsafePayloads.Peek(), "pop because applied") - - // repeat for A2 - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA2}) - emitter.AssertExpectations(t) - - lowest = cl.LowestQueuedUnsafeBlock() - require.Equal(t, refA2, lowest, "expecting A2 next") - - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA2}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - - // now pretend the payload was processed: we can drop A2 now - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA2, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - require.Nil(t, cl.unsafePayloads.Peek(), "pop because applied") - }) - - t.Run("double buffer", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA2}) - emitter.AssertExpectations(t) - - lowest := cl.LowestQueuedUnsafeBlock() - require.Equal(t, refA1, lowest, "expecting A1 next") - - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA0, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - require.Equal(t, 2, cl.unsafePayloads.Len(), "still holding on to A1, and queued A2") - - // Now pretend the payload was processed: we can drop A1 now. - // The CL-sync will try to immediately continue with A2. - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA2}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - - // now pretend the payload was processed: we can drop A2 now - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA2, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - require.Nil(t, cl.unsafePayloads.Peek(), "done") - }) - - t.Run("temporary error", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA0, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - - // On temporary errors we don't need any feedback from the engine. - // We just hold on to what payloads there are in the queue. - require.NotNil(t, cl.unsafePayloads.Peek(), "no pop because temporary error") - - // Pretend we are still stuck on the same forkchoice. The CL-sync will retry sending the payload. - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA0, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - require.NotNil(t, cl.unsafePayloads.Peek(), "no pop because retry still unconfirmed") - - // Now confirm we got the payload this time - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA1, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - require.Nil(t, cl.unsafePayloads.Peek(), "pop because valid") - }) - - t.Run("invalid payload error", func(t *testing.T) { - logger := testlog.Logger(t, log.LevelError) - emitter := &testutils.MockEmitter{} - cl := NewCLSync(logger, cfg, metrics) - cl.AttachEmitter(emitter) - - // CLSync gets payload and requests engine state, to later determine if payload should be forwarded - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) - cl.OnEvent(context.Background(), ReceivedUnsafePayloadEvent{Envelope: payloadA1}) - emitter.AssertExpectations(t) - - // Engine signals, CLSync sends the payload - emitter.ExpectOnce(engine.ProcessUnsafePayloadEvent{Envelope: payloadA1}) - cl.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: refA0, - SafeL2Head: refA0, - FinalizedL2Head: refA0, - }) - emitter.AssertExpectations(t) - - // Pretend the payload is bad. It should not be retried after this. - cl.OnEvent(context.Background(), engine.PayloadInvalidEvent{Envelope: payloadA1, Err: errors.New("test err")}) - emitter.AssertExpectations(t) - require.Nil(t, cl.unsafePayloads.Peek(), "pop because invalid") - }) -} diff --git a/op-node/rollup/clsync/payloads_queue_test.go b/op-node/rollup/clsync/payloads_queue_test.go deleted file mode 100644 index 58cef00ee7758..0000000000000 --- a/op-node/rollup/clsync/payloads_queue_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package clsync - -import ( - "container/heap" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" -) - -func TestPayloadsByNumber(t *testing.T) { - p := payloadsByNumber{} - mk := func(i uint64) payloadAndSize { - return payloadAndSize{ - envelope: ð.ExecutionPayloadEnvelope{ - ExecutionPayload: ð.ExecutionPayload{ - BlockNumber: eth.Uint64Quantity(i), - }, - }, - } - } - // add payload A, check it was added - a := mk(123) - heap.Push(&p, a) - require.Equal(t, p.Len(), 1) - require.Equal(t, p[0], a) - - // add payload B, check it was added in top-priority spot - b := mk(100) - heap.Push(&p, b) - require.Equal(t, p.Len(), 2) - require.Equal(t, p[0], b) - - // add payload C, check it did not get first like B, since block num is higher - c := mk(150) - heap.Push(&p, c) - require.Equal(t, p.Len(), 3) - require.Equal(t, p[0], b) // still b - - // pop b - heap.Pop(&p) - require.Equal(t, p.Len(), 2) - require.Equal(t, p[0], a) - - // pop a - heap.Pop(&p) - require.Equal(t, p.Len(), 1) - require.Equal(t, p[0], c) - - // pop c - heap.Pop(&p) - require.Equal(t, p.Len(), 0) - - // duplicate entry - heap.Push(&p, b) - require.Equal(t, p.Len(), 1) - heap.Push(&p, b) - require.Equal(t, p.Len(), 2) - heap.Pop(&p) - require.Equal(t, p.Len(), 1) -} - -func TestPayloadMemSize(t *testing.T) { - require.Equal(t, payloadMemFixedCost, payloadMemSize(nil), "nil is same fixed cost") - require.Equal(t, payloadMemFixedCost, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{}}), "empty payload fixed cost") - require.Equal(t, payloadMemFixedCost+payloadTxMemOverhead, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{nil}}}), "nil tx counts") - require.Equal(t, payloadMemFixedCost+payloadTxMemOverhead, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{make([]byte, 0)}}}), "empty tx counts") - require.Equal(t, payloadMemFixedCost+4*payloadTxMemOverhead+42+1337+0+1, - payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{ - make([]byte, 42), - make([]byte, 1337), - make([]byte, 0), - make([]byte, 1), - }}}), "mixed txs") -} - -func envelope(payload *eth.ExecutionPayload) *eth.ExecutionPayloadEnvelope { - return ð.ExecutionPayloadEnvelope{ExecutionPayload: payload} -} - -func TestPayloadsQueue(t *testing.T) { - pq := NewPayloadsQueue(testlog.Logger(t, log.LvlInfo), payloadMemFixedCost*3, payloadMemSize) - require.Equal(t, 0, pq.Len()) - require.Nil(t, pq.Peek()) - require.Nil(t, pq.Pop()) - - a := envelope(ð.ExecutionPayload{BlockNumber: 3, BlockHash: common.Hash{3}}) - b := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}) - c := envelope(ð.ExecutionPayload{BlockNumber: 5, BlockHash: common.Hash{5}}) - d := envelope(ð.ExecutionPayload{BlockNumber: 6, BlockHash: common.Hash{6}}) - bAlt := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{0xff}}) - bDup := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}) - - require.NoError(t, pq.Push(b)) - require.Equal(t, pq.Len(), 1) - require.Equal(t, pq.Peek(), b) - - require.Error(t, pq.Push(nil), "cannot add nil payloads") - - require.NoError(t, pq.Push(c)) - require.Equal(t, pq.Len(), 2) - require.Equal(t, pq.MemSize(), 2*payloadMemFixedCost) - require.Equal(t, pq.Peek(), b, "expecting b to still be the lowest number payload") - - require.NoError(t, pq.Push(a)) - require.Equal(t, pq.Len(), 3) - require.Equal(t, pq.MemSize(), 3*payloadMemFixedCost) - require.Equal(t, pq.Peek(), a, "expecting a to be new lowest number") - - require.Equal(t, pq.Pop(), a) - require.Equal(t, pq.Len(), 2, "expecting to pop the lowest") - - require.Equal(t, pq.Peek(), b, "expecting b to be lowest, compared to c") - - require.Equal(t, pq.Pop(), b) - require.Equal(t, pq.Len(), 1) - require.Equal(t, pq.MemSize(), payloadMemFixedCost) - - require.Equal(t, pq.Pop(), c) - require.Equal(t, pq.Len(), 0, "expecting no items to remain") - - e := envelope(ð.ExecutionPayload{BlockNumber: 5, Transactions: []eth.Data{make([]byte, payloadMemFixedCost*3+1)}}) - require.Error(t, pq.Push(e), "cannot add payloads that are too large") - - require.NoError(t, pq.Push(b)) - require.Equal(t, pq.Len(), 1, "expecting b") - require.Equal(t, pq.Peek(), b) - require.NoError(t, pq.Push(c)) - require.Equal(t, pq.Len(), 2, "expecting b, c") - require.Equal(t, pq.Peek(), b) - require.NoError(t, pq.Push(a)) - require.Equal(t, pq.Len(), 3, "expecting a, b, c") - require.Equal(t, pq.Peek(), a) - - // No duplicates allowed - require.Error(t, pq.Push(bDup)) - // But reorg data allowed - require.NoError(t, pq.Push(bAlt)) - - require.NoError(t, pq.Push(d)) - require.Equal(t, pq.Len(), 3) - require.Equal(t, pq.Peek(), b, "expecting b, c, d") - require.NotContainsf(t, pq.pq[:], a, "a should be dropped after 3 items already exist under max size constraint") -} diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 54fa6a4570309..d903bbdf7ab2c 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -30,23 +31,25 @@ type SystemConfigL2Fetcher interface { // FetchingAttributesBuilder fetches inputs for the building of L2 payload attributes on the fly. type FetchingAttributesBuilder struct { - rollupCfg *rollup.Config - depSet DependencySet - l1 L1ReceiptsFetcher - l2 SystemConfigL2Fetcher + rollupCfg *rollup.Config + l1ChainConfig *params.ChainConfig + depSet DependencySet + l1 L1ReceiptsFetcher + l2 SystemConfigL2Fetcher // whether to skip the L1 origin timestamp check - only for testing purposes testSkipL1OriginCheck bool } -func NewFetchingAttributesBuilder(rollupCfg *rollup.Config, depSet DependencySet, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder { +func NewFetchingAttributesBuilder(rollupCfg *rollup.Config, l1ChainConfig *params.ChainConfig, depSet DependencySet, l1 L1ReceiptsFetcher, l2 SystemConfigL2Fetcher) *FetchingAttributesBuilder { if rollupCfg.InteropTime != nil && depSet == nil { panic("FetchingAttributesBuilder requires a dependency set when interop fork is scheduled") } return &FetchingAttributesBuilder{ - rollupCfg: rollupCfg, - depSet: depSet, - l1: l1, - l2: l2, + rollupCfg: rollupCfg, + l1ChainConfig: l1ChainConfig, + depSet: depSet, + l1: l1, + l2: l2, } } @@ -158,7 +161,7 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex } } - l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, sysConfig, seqNumber, l1Info, nextL2Time) + l1InfoTx, err := L1InfoDepositBytes(ba.rollupCfg, ba.l1ChainConfig, sysConfig, seqNumber, l1Info, nextL2Time) if err != nil { return nil, NewCriticalError(fmt.Errorf("failed to create l1InfoTx: %w", err)) } @@ -198,6 +201,8 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex r.EIP1559Params = new(eth.Bytes8) *r.EIP1559Params = sysConfig.EIP1559Params } - + if ba.rollupCfg.IsMinBaseFee(nextL2Time) { + r.MinBaseFee = &sysConfig.MinBaseFee + } return r, nil } diff --git a/op-node/rollup/derive/attributes_queue_test.go b/op-node/rollup/derive/attributes_queue_test.go index 2f6e59141e4c7..2f63983ab73ba 100644 --- a/op-node/rollup/derive/attributes_queue_test.go +++ b/op-node/rollup/derive/attributes_queue_test.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -67,7 +68,7 @@ func TestAttributesQueue(t *testing.T) { l2Fetcher.ExpectSystemConfigByL2Hash(safeHead.Hash, parentL1Cfg, nil) rollupCfg := rollup.Config{} - l1InfoTx, err := L1InfoDepositBytes(&rollupCfg, expectedL1Cfg, safeHead.SequenceNumber+1, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(&rollupCfg, params.MergedTestChainConfig, expectedL1Cfg, safeHead.SequenceNumber+1, l1Info, 0) require.NoError(t, err) attrs := eth.PayloadAttributes{ Timestamp: eth.Uint64Quantity(safeHead.Time + cfg.BlockTime), @@ -77,7 +78,7 @@ func TestAttributesQueue(t *testing.T) { NoTxPool: true, GasLimit: (*eth.Uint64Quantity)(&expectedL1Cfg.GasLimit), } - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l2Fetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l2Fetcher) aq := NewAttributesQueue(testlog.Logger(t, log.LevelError), cfg, attrBuilder, nil) diff --git a/op-node/rollup/derive/attributes_test.go b/op-node/rollup/derive/attributes_test.go index 3be85d8406741..5d1056b4622b7 100644 --- a/op-node/rollup/derive/attributes_test.go +++ b/op-node/rollup/derive/attributes_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -50,7 +51,7 @@ func TestPreparePayloadAttributes(t *testing.T) { l1Info.InfoNum = l2Parent.L1Origin.Number + 1 epoch := l1Info.ID() l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) - attrBuilder := NewFetchingAttributesBuilder(mkCfg(), nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(mkCfg(), params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) _, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NotNil(t, err, "inconsistent L1 origin error expected") require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg") @@ -66,7 +67,7 @@ func TestPreparePayloadAttributes(t *testing.T) { l1Info := testutils.RandomBlockInfo(rng) l1Info.InfoNum = l2Parent.L1Origin.Number epoch := l1Info.ID() - attrBuilder := NewFetchingAttributesBuilder(mkCfg(), nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(mkCfg(), params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) _, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NotNil(t, err, "inconsistent L1 origin error expected") require.ErrorIs(t, err, ErrReset, "inconsistent L1 origin transition must be handled like a critical error with reorg") @@ -83,7 +84,7 @@ func TestPreparePayloadAttributes(t *testing.T) { epoch.Number += 1 mockRPCErr := errors.New("mock rpc error") l1Fetcher.ExpectFetchReceipts(epoch.Hash, nil, nil, mockRPCErr) - attrBuilder := NewFetchingAttributesBuilder(mkCfg(), nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(mkCfg(), params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) _, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected") require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg") @@ -99,7 +100,7 @@ func TestPreparePayloadAttributes(t *testing.T) { epoch := l2Parent.L1Origin mockRPCErr := errors.New("mock rpc error") l1Fetcher.ExpectInfoByHash(epoch.Hash, nil, mockRPCErr) - attrBuilder := NewFetchingAttributesBuilder(mkCfg(), nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(mkCfg(), params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) _, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.ErrorIs(t, err, mockRPCErr, "mock rpc error expected") require.ErrorIs(t, err, ErrTemporary, "rpc errors should not be critical, it is not necessary to reorg") @@ -117,10 +118,10 @@ func TestPreparePayloadAttributes(t *testing.T) { l1Info.InfoParentHash = l2Parent.L1Origin.Hash l1Info.InfoNum = l2Parent.L1Origin.Number + 1 epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(mkCfg(), testSysCfg, 0, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(mkCfg(), params.MergedTestChainConfig, testSysCfg, 0, l1Info, 0) require.NoError(t, err) l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.NotNil(t, attrs) @@ -156,13 +157,13 @@ func TestPreparePayloadAttributes(t *testing.T) { require.NoError(t, err) epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, 0, l1Info, 0) require.NoError(t, err) l2Txs := append(append(make([]eth.Data, 0), l1InfoTx), usedDepositTxs...) l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, receipts, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.NotNil(t, attrs) @@ -187,11 +188,11 @@ func TestPreparePayloadAttributes(t *testing.T) { l1Info.InfoNum = l2Parent.L1Origin.Number epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, l2Parent.SequenceNumber+1, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, l2Parent.SequenceNumber+1, l1Info, 0) require.NoError(t, err) l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.NotNil(t, attrs) @@ -233,7 +234,7 @@ func TestPreparePayloadAttributes(t *testing.T) { seqNumber := uint64(0) epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, seqNumber, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, seqNumber, l1Info, 0) require.NoError(t, err) require.NoError(t, err) @@ -242,7 +243,7 @@ func TestPreparePayloadAttributes(t *testing.T) { l2Txs = append(l2Txs, userDepositTxs...) l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, receipts, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, depSet, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, depSet, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.NotNil(t, attrs) @@ -274,7 +275,7 @@ func TestPreparePayloadAttributes(t *testing.T) { seqNumber := l2Parent.SequenceNumber + 1 epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, seqNumber, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, seqNumber, l1Info, 0) require.NoError(t, err) require.NoError(t, err) @@ -282,7 +283,7 @@ func TestPreparePayloadAttributes(t *testing.T) { l2Txs = append(l2Txs, l1InfoTx) l1Fetcher.ExpectInfoByHash(epoch.Hash, l1Info, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, depSet, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, depSet, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.NotNil(t, attrs) @@ -315,10 +316,10 @@ func TestPreparePayloadAttributes(t *testing.T) { l1Info.InfoParentHash = l2Parent.L1Origin.Hash l1Info.InfoNum = l2Parent.L1Origin.Number + 1 epoch := l1Info.ID() - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, 0) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, 0, l1Info, 0) require.NoError(t, err) l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.Equal(t, eip1559Params, *attrs.EIP1559Params) @@ -367,10 +368,10 @@ func TestPreparePayloadAttributes(t *testing.T) { if !tc.regolith { time-- } - l1InfoTx, err := L1InfoDepositBytes(cfg, testSysCfg, 0, l1Info, time) + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, 0, l1Info, time) require.NoError(t, err) l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, nil, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, nil, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) require.Equal(t, l1InfoTx, []byte(attrs.Transactions[0])) @@ -401,7 +402,7 @@ func TestPreparePayloadAttributes(t *testing.T) { epoch := l1Info.ID() l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) - attrBuilder := NewFetchingAttributesBuilder(cfg, depSet, l1Fetcher, l1CfgFetcher) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, depSet, l1Fetcher, l1CfgFetcher) attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) require.NoError(t, err) return attrs @@ -440,6 +441,44 @@ func TestPreparePayloadAttributes(t *testing.T) { require.Equal(t, tx, attrs.Transactions[i+1]) } }) + + t.Run("minimum base fee param", func(t *testing.T) { + cfg := mkCfg() + cfg.ActivateAtGenesis(rollup.Jovian) + rng := rand.New(rand.NewSource(1234)) + l1Fetcher := &testutils.MockL1Source{} + defer l1Fetcher.AssertExpectations(t) + l2Parent := testutils.RandomL2BlockRef(rng) + l1CfgFetcher := &testutils.MockL2Client{} + eip1559Params := eth.Bytes8([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}) + minBaseFee := uint64(1e9) + testSysCfg := eth.SystemConfig{ + BatcherAddr: common.Address{42}, + Overhead: [32]byte{}, + Scalar: [32]byte{}, + EIP1559Params: eip1559Params, + MinBaseFee: minBaseFee, + } + l1CfgFetcher.ExpectSystemConfigByL2Hash(l2Parent.Hash, testSysCfg, nil) + defer l1CfgFetcher.AssertExpectations(t) + l1Info := testutils.RandomBlockInfo(rng) + l1Info.InfoParentHash = l2Parent.L1Origin.Hash + l1Info.InfoNum = l2Parent.L1Origin.Number + 1 + epoch := l1Info.ID() + l1InfoTx, err := L1InfoDepositBytes(cfg, params.MergedTestChainConfig, testSysCfg, 0, l1Info, 0) + require.NoError(t, err) + l1Fetcher.ExpectFetchReceipts(epoch.Hash, l1Info, nil, nil) + depSet, err := depset.NewStaticConfigDependencySet(map[eth.ChainID]*depset.StaticConfigDependency{ + eth.ChainIDFromUInt64(42): {}, + }) + require.NoError(t, err) + attrBuilder := NewFetchingAttributesBuilder(cfg, params.MergedTestChainConfig, depSet, l1Fetcher, l1CfgFetcher) + attrs, err := attrBuilder.PreparePayloadAttributes(context.Background(), l2Parent, epoch) + require.NoError(t, err) + require.Equal(t, eip1559Params, *attrs.EIP1559Params) + require.Equal(t, minBaseFee, *attrs.MinBaseFee) + require.Equal(t, l1InfoTx, []byte(attrs.Transactions[0])) + }) }) } diff --git a/op-node/rollup/derive/deriver.go b/op-node/rollup/derive/deriver.go index a76802b31a3fd..042b7a9e4a7e3 100644 --- a/op-node/rollup/derive/deriver.go +++ b/op-node/rollup/derive/deriver.go @@ -121,10 +121,12 @@ func (d *PipelineDeriver) AttachEmitter(em event.Emitter) { d.emitter = em } +func (d *PipelineDeriver) ResetPipeline() { + d.pipeline.Reset() +} + func (d *PipelineDeriver) OnEvent(ctx context.Context, ev event.Event) bool { switch x := ev.(type) { - case rollup.ForceResetEvent: - d.pipeline.Reset() case PipelineStepEvent: // Don't generate attributes if there are already attributes in-flight if d.needAttributesConfirmation { diff --git a/op-node/rollup/derive/l1_block_info.go b/op-node/rollup/derive/l1_block_info.go index ddeb35c5cdb24..af063d65b337a 100644 --- a/op-node/rollup/derive/l1_block_info.go +++ b/op-node/rollup/derive/l1_block_info.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -404,7 +405,7 @@ func L1BlockInfoFromBytes(rollupCfg *rollup.Config, l2BlockTime uint64, data []b // L1InfoDeposit creates a L1 Info deposit transaction based on the L1 block, // and the L2 block-height difference with the start of the epoch. -func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, l2Timestamp uint64) (*types.DepositTx, error) { +func L1InfoDeposit(rollupCfg *rollup.Config, l1ChainConfig *params.ChainConfig, sysCfg eth.SystemConfig, seqNumber uint64, block eth.BlockInfo, l2Timestamp uint64) (*types.DepositTx, error) { l1BlockInfo := L1BlockInfo{ Number: block.NumberU64(), Time: block.Time(), @@ -416,7 +417,7 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber var data []byte if isEcotoneButNotFirstBlock(rollupCfg, l2Timestamp) { isIsthmusActivated := isIsthmusButNotFirstBlock(rollupCfg, l2Timestamp) - l1BlockInfo.BlobBaseFee = block.BlobBaseFee() + l1BlockInfo.BlobBaseFee = block.BlobBaseFee(l1ChainConfig) // Apply Cancun blob base fee calculation if this chain needs the L1 Pectra // blob schedule fix (mostly Holesky and Sepolia OP-Stack chains). @@ -496,8 +497,8 @@ func L1InfoDeposit(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber } // L1InfoDepositBytes returns a serialized L1-info attributes transaction. -func L1InfoDepositBytes(rollupCfg *rollup.Config, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, l2Timestamp uint64) ([]byte, error) { - dep, err := L1InfoDeposit(rollupCfg, sysCfg, seqNumber, l1Info, l2Timestamp) +func L1InfoDepositBytes(rollupCfg *rollup.Config, l1ChainConfig *params.ChainConfig, sysCfg eth.SystemConfig, seqNumber uint64, l1Info eth.BlockInfo, l2Timestamp uint64) ([]byte, error) { + dep, err := L1InfoDeposit(rollupCfg, l1ChainConfig, sysCfg, seqNumber, l1Info, l2Timestamp) if err != nil { return nil, fmt.Errorf("failed to create L1 info tx: %w", err) } diff --git a/op-node/rollup/derive/l1_block_info_test.go b/op-node/rollup/derive/l1_block_info_test.go index c634a9ae4ea25..0a7def874f052 100644 --- a/op-node/rollup/derive/l1_block_info_test.go +++ b/op-node/rollup/derive/l1_block_info_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -69,7 +70,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { info := testCase.mkInfo(rng) l1Cfg := testCase.mkL1Cfg(rng, info) seqNr := testCase.seqNr(rng) - depTx, err := L1InfoDeposit(&rollupCfg, l1Cfg, seqNr, info, 0) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, l1Cfg, seqNr, info, 0) require.NoError(t, err) res, err := L1BlockInfoFromBytes(&rollupCfg, info.Time(), depTx.Data) require.NoError(t, err, "expected valid deposit info") @@ -99,7 +100,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { t.Run("invalid selector", func(t *testing.T) { rng := rand.New(rand.NewSource(1234)) info := testutils.MakeBlockInfo(nil)(rng) - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) require.NoError(t, err) _, err = crand.Read(depTx.Data[0:4]) require.NoError(t, err) @@ -111,7 +112,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{} rollupCfg.ActivateAtGenesis(rollup.Regolith) - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, 0) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -123,7 +124,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Ecotone) // run 1 block after ecotone transition timestamp := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -136,7 +137,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Delta) ecotoneTime := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime // activate ecotone just after genesis rollupCfg.EcotoneTime = &ecotoneTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, ecotoneTime) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, ecotoneTime) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -147,7 +148,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} rollupCfg.ActivateAtGenesis(rollup.Ecotone) - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -160,7 +161,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Isthmus) // run 1 block after isthmus transition timestamp := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -173,7 +174,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Granite) isthmusTime := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime // activate isthmus just after genesis rollupCfg.InteropTime = &isthmusTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, isthmusTime) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, isthmusTime) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -186,7 +187,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} rollupCfg.ActivateAtGenesis(rollup.Isthmus) - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -199,7 +200,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Interop) // run 1 block after interop transition timestamp := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, timestamp) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -212,7 +213,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { rollupCfg.ActivateAtGenesis(rollup.Isthmus) interopTime := rollupCfg.Genesis.L2Time + rollupCfg.BlockTime // activate interop just after genesis rollupCfg.InteropTime = &interopTime - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, interopTime) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, interopTime) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) @@ -225,7 +226,7 @@ func TestParseL1InfoDepositTxData(t *testing.T) { info := testutils.MakeBlockInfo(nil)(rng) rollupCfg := rollup.Config{BlockTime: 2, Genesis: rollup.Genesis{L2Time: 1000}} rollupCfg.ActivateAtGenesis(rollup.Interop) - depTx, err := L1InfoDeposit(&rollupCfg, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, randomL1Cfg(rng, info), randomSeqNr(rng), info, rollupCfg.Genesis.L2Time) require.NoError(t, err) require.False(t, depTx.IsSystemTransaction) require.Equal(t, depTx.Gas, uint64(RegolithSystemTxGas)) diff --git a/op-node/rollup/derive/l1_block_info_tob_test.go b/op-node/rollup/derive/l1_block_info_tob_test.go index d7c9f2f8931dc..7aacd13cf8779 100644 --- a/op-node/rollup/derive/l1_block_info_tob_test.go +++ b/op-node/rollup/derive/l1_block_info_tob_test.go @@ -7,6 +7,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/testutils/fuzzerutils" + "github.com/ethereum/go-ethereum/params" fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/require" ) @@ -29,7 +30,7 @@ func FuzzParseL1InfoDepositTxDataValid(f *testing.F) { var rollupCfg rollup.Config // Create our deposit tx from our info - depTx, err := L1InfoDeposit(&rollupCfg, sysCfg, seqNr, &l1Info, 0) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, sysCfg, seqNr, &l1Info, 0) require.NoError(t, err, "error creating deposit tx from L1 info") // Get our info from out deposit tx @@ -74,7 +75,7 @@ func FuzzDecodeDepositTxDataToL1Info(f *testing.F) { GasLimit: uint64(0), } - depTx, err := L1InfoDeposit(&rollupCfg, sysCfg, res.SequenceNumber, &l1Info, 0) + depTx, err := L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, sysCfg, res.SequenceNumber, &l1Info, 0) require.NoError(t, err, "error creating deposit tx from L1 info") require.Equal(t, depTx.Data, fuzzedData) }) diff --git a/op-node/rollup/derive/payload_util.go b/op-node/rollup/derive/payload_util.go index eb02d1afa4565..6eec2ed135e3e 100644 --- a/op-node/rollup/derive/payload_util.go +++ b/op-node/rollup/derive/payload_util.go @@ -90,12 +90,15 @@ func PayloadToSystemConfig(rollupCfg *rollup.Config, payload *eth.ExecutionPaylo Scalar: info.L1FeeScalar, GasLimit: uint64(payload.GasLimit), } - if rollupCfg.IsHolocene(uint64(payload.Timestamp)) { - if err := eip1559.ValidateHoloceneExtraData(payload.ExtraData); err != nil { - return eth.SystemConfig{}, err - } - d, e := eip1559.DecodeHoloceneExtraData(payload.ExtraData) - copy(r.EIP1559Params[:], eip1559.EncodeHolocene1559Params(d, e)) + err = eip1559.ValidateOptimismExtraData(rollupCfg, uint64(payload.Timestamp), payload.ExtraData) + if err != nil { + return eth.SystemConfig{}, err + } + d, e, m := eip1559.DecodeOptimismExtraData(rollupCfg, uint64(payload.Timestamp), payload.ExtraData) + copy(r.EIP1559Params[:], eip1559.EncodeHolocene1559Params(d, e)) + if rollupCfg.IsJovian(uint64(payload.Timestamp)) { + // ValidateOptimismExtraData returning a nil error guarantees that m is not nil + r.MinBaseFee = *m } if rollupCfg.IsIsthmus(uint64(payload.Timestamp)) { diff --git a/op-node/rollup/derive/pipeline.go b/op-node/rollup/derive/pipeline.go index a593dd1bc94cf..bae024da1c2de 100644 --- a/op-node/rollup/derive/pipeline.go +++ b/op-node/rollup/derive/pipeline.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -96,7 +97,7 @@ type DerivationPipeline struct { // NewDerivationPipeline creates a DerivationPipeline, to turn L1 data into L2 block-inputs. func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, depSet DependencySet, l1Fetcher L1Fetcher, l1Blobs L1BlobsFetcher, - altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, managedBySupervisor bool, + altDA AltDAInputFetcher, l2Source L2Source, metrics Metrics, managedBySupervisor bool, l1ChainConfig *params.ChainConfig, ) *DerivationPipeline { spec := rollup.NewChainSpec(rollupCfg) // Stages are strung together into a pipeline, @@ -113,7 +114,7 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, depSet Depe channelMux := NewChannelMux(log, spec, frameQueue, metrics) chInReader := NewChannelInReader(rollupCfg, log, channelMux, metrics) batchMux := NewBatchMux(log, rollupCfg, chInReader, l2Source) - attrBuilder := NewFetchingAttributesBuilder(rollupCfg, depSet, l1Fetcher, l2Source) + attrBuilder := NewFetchingAttributesBuilder(rollupCfg, l1ChainConfig, depSet, l1Fetcher, l2Source) attributesQueue := NewAttributesQueue(log, rollupCfg, attrBuilder, batchMux) // Reset from ResetEngine then up from L1 Traversal. The stages do not talk to each other during @@ -138,7 +139,8 @@ func NewDerivationPipeline(log log.Logger, rollupCfg *rollup.Config, depSet Depe // DerivationReady returns true if the derivation pipeline is ready to be used. // When it's being reset its state is inconsistent, and should not be used externally. func (dp *DerivationPipeline) DerivationReady() bool { - return dp.engineIsReset && dp.resetting > 0 + // Ready only when the engine has been confirmed reset and all stages finished resetting + return dp.engineIsReset && dp.resetting >= len(dp.stages) } func (dp *DerivationPipeline) Reset() { diff --git a/op-node/rollup/derive/system_config.go b/op-node/rollup/derive/system_config.go index 64e21ff15f33f..bcb2a33b13145 100644 --- a/op-node/rollup/derive/system_config.go +++ b/op-node/rollup/derive/system_config.go @@ -23,6 +23,7 @@ var ( SystemConfigUpdateUnsafeBlockSigner = common.Hash{31: 3} SystemConfigUpdateEIP1559Params = common.Hash{31: 4} SystemConfigUpdateOperatorFeeParams = common.Hash{31: 5} + SystemConfigUpdateMinBaseFee = common.Hash{31: 6} ) var ( @@ -177,6 +178,22 @@ func ProcessSystemConfigUpdateLogEvent(destSysCfg *eth.SystemConfig, ev *types.L case SystemConfigUpdateUnsafeBlockSigner: // Ignored in derivation. This configurable applies to runtime configuration outside of the derivation. return nil + case SystemConfigUpdateMinBaseFee: + if pointer, err := solabi.ReadUint64(reader); err != nil || pointer != 32 { + return NewCriticalError(errors.New("invalid pointer field")) + } + if length, err := solabi.ReadUint64(reader); err != nil || length != 32 { + return NewCriticalError(errors.New("invalid length field")) + } + minBaseFee, err := solabi.ReadUint64(reader) + if err != nil { + return NewCriticalError(errors.New("could not read minBaseFee")) + } + if !solabi.EmptyReader(reader) { + return NewCriticalError(errors.New("too many bytes")) + } + destSysCfg.MinBaseFee = minBaseFee + return nil default: return fmt.Errorf("unrecognized L1 sysCfg update type: %s", updateType) } diff --git a/op-node/rollup/derive/system_config_test.go b/op-node/rollup/derive/system_config_test.go index 5906f26cbea77..e98b792d4cfaa 100644 --- a/op-node/rollup/derive/system_config_test.go +++ b/op-node/rollup/derive/system_config_test.go @@ -34,6 +34,7 @@ var ( } eip1559Params = []byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8} operatorFeeParams = []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0, 0x0, 0x7, 0xd, 0x8} + minBaseFee = uint64(1e9) ) // TestProcessSystemConfigUpdateLogEvent tests the parsing of an event and mutating the @@ -231,6 +232,29 @@ func TestProcessSystemConfigUpdateLogEvent(t *testing.T) { }, err: false, }, + { + name: "SystemConfigUpdateMinBaseFee", + log: &types.Log{ + Topics: []common.Hash{ + ConfigUpdateEventABIHash, + ConfigUpdateEventVersion0, + SystemConfigUpdateMinBaseFee, + }, + }, + hook: func(t *testing.T, log *types.Log) *types.Log { + numberData, err := oneUint256.Pack(new(big.Int).SetUint64(minBaseFee)) + require.NoError(t, err) + data, err := bytesArgs.Pack(numberData) + require.NoError(t, err) + log.Data = data + return log + }, + config: eth.SystemConfig{ + EIP1559Params: eth.Bytes8{0, 0, 0, 0, 0, 0, 0, 0}, + MinBaseFee: minBaseFee, + }, + err: false, + }, } for _, test := range tests { diff --git a/op-node/rollup/derive/test/random.go b/op-node/rollup/derive/test/random.go index 9f4febcb354ca..1ccece081f1fa 100644 --- a/op-node/rollup/derive/test/random.go +++ b/op-node/rollup/derive/test/random.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/trie" ) @@ -23,7 +24,7 @@ func RandomL2Block(rng *rand.Rand, txCount int, t time.Time) (*types.Block, []*t t := uint64(0) rollupCfg.RegolithTime = &t } - l1InfoTx, err := derive.L1InfoDeposit(&rollupCfg, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), 0) + l1InfoTx, err := derive.L1InfoDeposit(&rollupCfg, params.MergedTestChainConfig, eth.SystemConfig{}, 0, eth.BlockToInfo(l1Block), 0) if err != nil { panic("L1InfoDeposit: " + err.Error()) } diff --git a/op-node/rollup/driver/constants.go b/op-node/rollup/driver/constants.go new file mode 100644 index 0000000000000..68176b32fddec --- /dev/null +++ b/op-node/rollup/driver/constants.go @@ -0,0 +1,9 @@ +package driver + +import "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" + +// aliases to not disrupt op-conductor code +var ( + ErrSequencerAlreadyStarted = sequencing.ErrSequencerAlreadyStarted + ErrSequencerAlreadyStopped = sequencing.ErrSequencerAlreadyStopped +) diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index fe91875f63861..c006dcd952954 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -2,16 +2,18 @@ package driver import ( "context" + "fmt" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - altda "github.com/ethereum-optimism/optimism/op-alt-da" + gosync "sync" + "github.com/ethereum-optimism/optimism/op-node/metrics/metered" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/async" "github.com/ethereum-optimism/optimism/op-node/rollup/attributes" - "github.com/ethereum-optimism/optimism/op-node/rollup/clsync" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/confdepth" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -22,145 +24,16 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/event" + "github.com/ethereum/go-ethereum/params" ) -// aliases to not disrupt op-conductor code -var ( - ErrSequencerAlreadyStarted = sequencing.ErrSequencerAlreadyStarted - ErrSequencerAlreadyStopped = sequencing.ErrSequencerAlreadyStopped -) - -type Metrics interface { - RecordPipelineReset() - RecordPublishingError() - RecordDerivationError() - - RecordL1Ref(name string, ref eth.L1BlockRef) - RecordL2Ref(name string, ref eth.L2BlockRef) - RecordChannelInputBytes(inputCompressedBytes int) - RecordHeadChannelOpened() - RecordChannelTimedOut() - RecordFrame() - - RecordDerivedBatches(batchType string) - - RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) - - SetDerivationIdle(idle bool) - SetSequencerState(active bool) - - RecordL1ReorgDepth(d uint64) - - engine.Metrics - metered.L1FetcherMetrics - event.Metrics - sequencing.Metrics -} - -type L1Chain interface { - derive.L1Fetcher - L1BlockRefByLabel(context.Context, eth.BlockLabel) (eth.L1BlockRef, error) -} - -type L2Chain interface { - engine.Engine - L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) - L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) - L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) -} - -type DerivationPipeline interface { - Reset() - Step(ctx context.Context, pendingSafeHead eth.L2BlockRef) (*derive.AttributesWithParent, error) - Origin() eth.L1BlockRef - DerivationReady() bool - ConfirmEngineReset() -} - -type EngineController interface { - engine.RollupAPI - engine.LocalEngineControl - IsEngineSyncing() bool - InsertUnsafePayload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error - TryUpdateEngine(ctx context.Context) error - TryBackupUnsafeReorg(ctx context.Context) (bool, error) -} - -type CLSync interface { - LowestQueuedUnsafeBlock() eth.L2BlockRef -} - -type AttributesHandler interface { - // HasAttributes returns if there are any block attributes to process. - // HasAttributes is for EngineQueue testing only, and can be removed when attribute processing is fully independent. - HasAttributes() bool - // SetAttributes overwrites the set of attributes. This may be nil, to clear what may be processed next. - SetAttributes(attributes *derive.AttributesWithParent) - // Proceed runs one attempt of processing attributes, if any. - // Proceed returns io.EOF if there are no attributes to process. - Proceed(ctx context.Context) error -} - -type Finalizer interface { - FinalizedL1() eth.L1BlockRef - event.Deriver -} - -type AltDAIface interface { - // Notify L1 finalized head so AltDA finality is always behind L1 - Finalize(ref eth.L1BlockRef) - // Set the engine finalization signal callback - OnFinalizedHeadSignal(f altda.HeadSignalFn) - - derive.AltDAInputFetcher -} - -type SyncStatusTracker interface { - event.Deriver - SyncStatus() *eth.SyncStatus - L1Head() eth.L1BlockRef -} - -type Network interface { - // SignAndPublishL2Payload is called by the driver whenever there is a new payload to publish, synchronously with the driver main loop. - SignAndPublishL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error -} - -type AltSync interface { - // RequestL2Range informs the sync source that the given range of L2 blocks is missing, - // and should be retrieved from any available alternative syncing source. - // The start and end of the range are exclusive: - // the start is the head we already have, the end is the first thing we have queued up. - // It's the task of the alt-sync mechanism to use this hint to fetch the right payloads. - // Note that the end and start may not be consistent: in this case the sync method should fetch older history - // - // If the end value is zeroed, then the sync-method may determine the end free of choice, - // e.g. sync till the chain head meets the wallclock time. This functionality is optional: - // a fixed target to sync towards may be determined by picking up payloads through P2P gossip or other sources. - // - // The sync results should be returned back to the driver via the OnUnsafeL2Payload(ctx, payload) method. - // The latest requested range should always take priority over previous requests. - // There may be overlaps in requested ranges. - // An error may be returned if the scheduling fails immediately, e.g. a context timeout. - RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error -} - -type SequencerStateListener interface { - SequencerStarted() error - SequencerStopped() error -} - -type Drain interface { - Drain() error - Await() <-chan struct{} -} - // NewDriver composes an events handler that tracks L1 state, triggers L2 Derivation, and optionally sequences new L2 blocks. func NewDriver( sys event.Registry, drain Drain, driverCfg *Config, cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, depSet derive.DependencySet, l2 L2Chain, l1 L1Chain, @@ -183,65 +56,71 @@ func NewDriver( sys.Register("status", statusTracker) l1Tracker := status.NewL1Tracker(l1) - sys.Register("l1-blocks", l1Tracker) l1 = metered.NewMeteredL1Fetcher(l1Tracker, metrics) verifConfDepth := confdepth.NewConfDepth(driverCfg.VerifierConfDepth, statusTracker.L1Head, l1) - ec := engine.NewEngineController(l2, log, metrics, cfg, syncCfg, - sys.Register("engine-controller", nil)) - - sys.Register("engine-reset", - engine.NewEngineResetDeriver(driverCtx, log, cfg, l1, l2, syncCfg)) - - clSync := clsync.NewCLSync(log, cfg, metrics) // alt-sync still uses cl-sync state to determine what to sync to - sys.Register("cl-sync", clSync) + ec := engine.NewEngineController(driverCtx, l2, log, metrics, cfg, syncCfg, l1, sys.Register("engine-controller", nil)) + // TODO(#17115): Refactor dependency cycles + ec.SetCrossUpdateHandler(statusTracker) var finalizer Finalizer if cfg.AltDAEnabled() { - finalizer = finality.NewAltDAFinalizer(driverCtx, log, cfg, l1, altDA) + finalizer = finality.NewAltDAFinalizer(driverCtx, log, cfg, l1, altDA, ec) } else { - finalizer = finality.NewFinalizer(driverCtx, log, cfg, l1) + finalizer = finality.NewFinalizer(driverCtx, log, cfg, l1, ec) } sys.Register("finalizer", finalizer) - sys.Register("attributes-handler", - attributes.NewAttributesHandler(log, cfg, driverCtx, l2)) + attrHandler := attributes.NewAttributesHandler(log, cfg, driverCtx, l2, ec) + sys.Register("attributes-handler", attrHandler) - derivationPipeline := derive.NewDerivationPipeline(log, cfg, depSet, verifConfDepth, l1Blobs, altDA, l2, metrics, indexingMode) + derivationPipeline := derive.NewDerivationPipeline(log, cfg, depSet, verifConfDepth, l1Blobs, altDA, l2, metrics, indexingMode, l1ChainConfig) - sys.Register("pipeline", - derive.NewPipelineDeriver(driverCtx, derivationPipeline)) + pipelineDeriver := derive.NewPipelineDeriver(driverCtx, derivationPipeline) + sys.Register("pipeline", pipelineDeriver) + + // Connect components that need force reset notifications to the engine controller + ec.SetAttributesResetter(attrHandler) + ec.SetPipelineResetter(pipelineDeriver) + + schedDeriv := NewStepSchedulingDeriver(log) + sys.Register("step-scheduler", schedDeriv) syncDeriver := &SyncDeriver{ Derivation: derivationPipeline, SafeHeadNotifs: safeHeadListener, - CLSync: clSync, Engine: ec, SyncCfg: syncCfg, Config: cfg, L1: l1, + L1Tracker: l1Tracker, L2: l2, Log: log, Ctx: driverCtx, ManagedBySupervisor: indexingMode, + StepDeriver: schedDeriv, } + // TODO(#16917) Remove Event System Refactor Comments + // Couple SyncDeriver and EngineController for event refactoring + // Couple EngDeriver and NewAttributesHandler for event refactoring + ec.SyncDeriver = syncDeriver sys.Register("sync", syncDeriver) - - sys.Register("engine", engine.NewEngDeriver(log, driverCtx, cfg, metrics, ec)) - - schedDeriv := NewStepSchedulingDeriver(log) - sys.Register("step-scheduler", schedDeriv) + sys.Register("engine", ec) var sequencer sequencing.SequencerIface if driverCfg.SequencerEnabled { asyncGossiper := async.NewAsyncGossiper(driverCtx, network, log, metrics) - attrBuilder := derive.NewFetchingAttributesBuilder(cfg, depSet, l1, l2) + attrBuilder := derive.NewFetchingAttributesBuilder(cfg, l1ChainConfig, depSet, l1, l2) sequencerConfDepth := confdepth.NewConfDepth(driverCfg.SequencerConfDepth, statusTracker.L1Head, l1) findL1Origin := sequencing.NewL1OriginSelector(driverCtx, log, cfg, sequencerConfDepth) sys.Register("origin-selector", findL1Origin) + + // Connect origin selector to the engine controller for force reset notifications + ec.SetOriginSelectorResetter(findL1Origin) + sequencer = sequencing.NewSequencer(driverCtx, log, cfg, attrBuilder, findL1Origin, - sequencerStateListener, sequencerConductor, asyncGossiper, metrics, dacClient) + sequencerStateListener, sequencerConductor, asyncGossiper, metrics, ec, dacClient) sys.Register("sequencer", sequencer) } else { sequencer = sequencing.DisabledSequencer{} @@ -249,7 +128,8 @@ func NewDriver( driverEmitter := sys.Register("driver", nil) driver := &Driver{ - statusTracker: statusTracker, + StatusTracker: statusTracker, + Finalizer: finalizer, SyncDeriver: syncDeriver, sched: schedDeriv, emitter: driverEmitter, @@ -267,3 +147,267 @@ func NewDriver( return driver } + +type Driver struct { + StatusTracker SyncStatusTracker + Finalizer Finalizer + + SyncDeriver *SyncDeriver + + sched *StepSchedulingDeriver + + emitter event.Emitter + drain Drain + + // Requests to block the event loop for synchronous execution to avoid reading an inconsistent state + stateReq chan chan struct{} + + // Upon receiving a channel in this channel, the derivation pipeline is forced to be reset. + // It tells the caller that the reset occurred by closing the passed in channel. + forceReset chan chan struct{} + + // Driver config: verifier and sequencer settings. + // May not be modified after starting the Driver. + driverConfig *Config + + // Interface to signal the L2 block range to sync. + altSync AltSync + + sequencer sequencing.SequencerIface + + metrics Metrics + log log.Logger + + wg gosync.WaitGroup + + driverCtx context.Context + driverCancel context.CancelFunc +} + +// Start starts up the state loop. +// The loop will have been started iff err is not nil. +func (s *Driver) Start() error { + log.Info("Starting driver", "sequencerEnabled", s.driverConfig.SequencerEnabled, + "sequencerStopped", s.driverConfig.SequencerStopped, "recoverMode", s.driverConfig.RecoverMode) + if s.driverConfig.SequencerEnabled { + if s.driverConfig.RecoverMode { + log.Warn("sequencer is in recover mode") + s.sequencer.SetRecoverMode(true) + } + if err := s.sequencer.SetMaxSafeLag(s.driverCtx, s.driverConfig.SequencerMaxSafeLag); err != nil { + return fmt.Errorf("failed to set sequencer max safe lag: %w", err) + } + if err := s.sequencer.Init(s.driverCtx, !s.driverConfig.SequencerStopped); err != nil { + return fmt.Errorf("persist initial sequencer state: %w", err) + } + } + + s.wg.Add(1) + go s.eventLoop() + + return nil +} + +func (s *Driver) Close() error { + s.driverCancel() + s.wg.Wait() + s.sequencer.Close() + return nil +} + +// the eventLoop responds to L1 changes and internal timers to produce L2 blocks. +func (s *Driver) eventLoop() { + defer s.wg.Done() + s.log.Info("State loop started") + defer s.log.Info("State loop returned") + + defer s.driverCancel() + + // reqStep requests a derivation step nicely, with a delay if this is a reattempt, or not at all if we already scheduled a reattempt. + reqStep := func() { + s.sched.RequestStep(s.driverCtx, false) + } + + // We call reqStep right away to finish syncing to the tip of the chain if we're behind. + // reqStep will also be triggered when the L1 head moves forward or if there was a reorg on the + // L1 chain that we need to handle. + reqStep() + + sequencerTimer := time.NewTimer(0) + var sequencerCh <-chan time.Time + var prevTime time.Time + // planSequencerAction updates the sequencerTimer with the next action, if any. + // The sequencerCh is nil (indefinitely blocks on read) if no action needs to be performed, + // or set to the timer channel if there is an action scheduled. + planSequencerAction := func() { + nextAction, ok := s.sequencer.NextAction() + if !ok { + if sequencerCh != nil { + s.log.Info("Sequencer paused until new events") + } + sequencerCh = nil + return + } + // avoid unnecessary timer resets + if nextAction == prevTime { + return + } + prevTime = nextAction + sequencerCh = sequencerTimer.C + if len(sequencerCh) > 0 { // empty if not already drained before resetting + <-sequencerCh + } + delta := time.Until(nextAction) + s.log.Info("Scheduled sequencer action", "delta", delta) + sequencerTimer.Reset(delta) + } + + // Create a ticker to check if there is a gap in the engine queue. Whenever + // there is, we send requests to sync source to retrieve the missing payloads. + syncCheckInterval := time.Duration(s.SyncDeriver.Config.BlockTime) * time.Second * 2 + altSyncTicker := time.NewTicker(syncCheckInterval) + defer altSyncTicker.Stop() + lastUnsafeL2 := s.SyncDeriver.Engine.UnsafeL2Head() + + for { + if s.driverCtx.Err() != nil { // don't try to schedule/handle more work when we are closing. + return + } + + planSequencerAction() + + // If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync: + // there is no need to request L2 blocks when we are syncing already. + if head := s.SyncDeriver.Engine.UnsafeL2Head(); head != lastUnsafeL2 || !s.SyncDeriver.Derivation.DerivationReady() { + lastUnsafeL2 = head + altSyncTicker.Reset(syncCheckInterval) + } + + select { + case <-sequencerCh: + s.emitter.Emit(s.driverCtx, sequencing.SequencerActionEvent{}) + case <-altSyncTicker.C: + // Check if there is a gap in the current unsafe payload queue. + ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*2) + err := s.checkForGapInUnsafeQueue(ctx) + cancel() + if err != nil { + s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err) + } + case <-s.sched.NextDelayedStep(): + s.sched.AttemptStep(s.driverCtx) + case <-s.sched.NextStep(): + s.sched.AttemptStep(s.driverCtx) + case respCh := <-s.stateReq: + respCh <- struct{}{} + case respCh := <-s.forceReset: + s.log.Warn("Derivation pipeline is manually reset") + s.SyncDeriver.Derivation.Reset() + s.metrics.RecordPipelineReset() + close(respCh) + case <-s.drain.Await(): + if err := s.drain.Drain(); err != nil { + if s.driverCtx.Err() != nil { + return + } else { + s.log.Error("unexpected error from event-draining", "err", err) + s.emitter.Emit(s.driverCtx, rollup.CriticalErrorEvent{ + Err: fmt.Errorf("unexpected error: %w", err), + }) + } + } + case <-s.driverCtx.Done(): + return + } + } +} + +// ResetDerivationPipeline forces a reset of the derivation pipeline. +// It waits for the reset to occur. It simply unblocks the caller rather +// than fully cancelling the reset request upon a context cancellation. +func (s *Driver) ResetDerivationPipeline(ctx context.Context) error { + respCh := make(chan struct{}, 1) + select { + case <-ctx.Done(): + return ctx.Err() + case s.forceReset <- respCh: + select { + case <-ctx.Done(): + return ctx.Err() + case <-respCh: + return nil + } + } +} + +func (s *Driver) StartSequencer(ctx context.Context, blockHash common.Hash) error { + return s.sequencer.Start(ctx, blockHash) +} + +func (s *Driver) StopSequencer(ctx context.Context) (common.Hash, error) { + return s.sequencer.Stop(ctx) +} + +func (s *Driver) SequencerActive(ctx context.Context) (bool, error) { + return s.sequencer.Active(), nil +} + +func (s *Driver) OverrideLeader(ctx context.Context) error { + return s.sequencer.OverrideLeader(ctx) +} + +func (s *Driver) ConductorEnabled(ctx context.Context) (bool, error) { + return s.sequencer.ConductorEnabled(ctx), nil +} + +func (s *Driver) SetRecoverMode(ctx context.Context, mode bool) error { + s.sequencer.SetRecoverMode(mode) + return nil +} + +// SyncStatus blocks the driver event loop and captures the syncing status. +func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { + return s.StatusTracker.SyncStatus(), nil +} + +// BlockRefWithStatus blocks the driver event loop and captures the syncing status, +// along with an L2 block reference by number consistent with that same status. +// If the event loop is too busy and the context expires, a context error is returned. +func (s *Driver) BlockRefWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, *eth.SyncStatus, error) { + resp := s.StatusTracker.SyncStatus() + if resp.FinalizedL2.Number >= num { // If finalized, we are certain it does not reorg, and don't have to lock. + ref, err := s.SyncDeriver.L2.L2BlockRefByNumber(ctx, num) + return ref, resp, err + } + wait := make(chan struct{}) + select { + case s.stateReq <- wait: + resp := s.StatusTracker.SyncStatus() + ref, err := s.SyncDeriver.L2.L2BlockRefByNumber(ctx, num) + <-wait + return ref, resp, err + case <-ctx.Done(): + return eth.L2BlockRef{}, nil, ctx.Err() + } +} + +// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method. +// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved. +// Results are received through OnUnsafeL2Payload. +func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error { + start := s.SyncDeriver.Engine.UnsafeL2Head() + end := s.SyncDeriver.Engine.LowestQueuedUnsafeBlock() + // Check if we have missing blocks between the start and end. Request them if we do. + if end == (eth.L2BlockRef{}) { + s.log.Debug("requesting sync with open-end range", "start", start) + return s.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{}) + } else if end.Number > start.Number+1 { + s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number) + return s.altSync.RequestL2Range(ctx, start, end) + } + return nil +} + +func (s *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) { + s.SyncDeriver.OnUnsafeL2Payload(ctx, payload) +} diff --git a/op-node/rollup/driver/interfaces.go b/op-node/rollup/driver/interfaces.go new file mode 100644 index 0000000000000..fed4c828f7b71 --- /dev/null +++ b/op-node/rollup/driver/interfaces.go @@ -0,0 +1,131 @@ +package driver + +import ( + "context" + + altda "github.com/ethereum-optimism/optimism/op-alt-da" + opnodemetrics "github.com/ethereum-optimism/optimism/op-node/metrics" + "github.com/ethereum-optimism/optimism/op-node/metrics/metered" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/event" + "github.com/ethereum/go-ethereum/common" +) + +type Metrics interface { + RecordPipelineReset() + RecordPublishingError() + RecordDerivationError() + + RecordL1Ref(name string, ref eth.L1BlockRef) + RecordL2Ref(name string, ref eth.L2BlockRef) + RecordChannelInputBytes(inputCompressedBytes int) + RecordHeadChannelOpened() + RecordChannelTimedOut() + RecordFrame() + + RecordDerivedBatches(batchType string) + + RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) + + SetDerivationIdle(idle bool) + SetSequencerState(active bool) + + RecordL1ReorgDepth(d uint64) + + opnodemetrics.Metricer + metered.L1FetcherMetrics + event.Metrics + sequencing.Metrics +} + +type L1Chain interface { + derive.L1Fetcher + L1BlockRefByLabel(context.Context, eth.BlockLabel) (eth.L1BlockRef, error) +} + +type L2Chain interface { + engine.Engine + L2BlockRefByLabel(ctx context.Context, label eth.BlockLabel) (eth.L2BlockRef, error) + L2BlockRefByHash(ctx context.Context, l2Hash common.Hash) (eth.L2BlockRef, error) + L2BlockRefByNumber(ctx context.Context, num uint64) (eth.L2BlockRef, error) +} + +type DerivationPipeline interface { + Reset() + Step(ctx context.Context, pendingSafeHead eth.L2BlockRef) (*derive.AttributesWithParent, error) + Origin() eth.L1BlockRef + DerivationReady() bool + ConfirmEngineReset() +} + +type AttributesHandler interface { + // HasAttributes returns if there are any block attributes to process. + // HasAttributes is for EngineQueue testing only, and can be removed when attribute processing is fully independent. + HasAttributes() bool + // SetAttributes overwrites the set of attributes. This may be nil, to clear what may be processed next. + SetAttributes(attributes *derive.AttributesWithParent) + // Proceed runs one attempt of processing attributes, if any. + // Proceed returns io.EOF if there are no attributes to process. + Proceed(ctx context.Context) error +} + +type Finalizer interface { + FinalizedL1() eth.L1BlockRef + OnL1Finalized(x eth.L1BlockRef) + event.Deriver +} + +type AltDAIface interface { + // Notify L1 finalized head so AltDA finality is always behind L1 + Finalize(ref eth.L1BlockRef) + // Set the engine finalization signal callback + OnFinalizedHeadSignal(f altda.HeadSignalFn) + + derive.AltDAInputFetcher +} + +type SyncStatusTracker interface { + event.Deriver + SyncStatus() *eth.SyncStatus + L1Head() eth.L1BlockRef + OnL1Unsafe(x eth.L1BlockRef) + OnL1Safe(x eth.L1BlockRef) + OnL1Finalized(x eth.L1BlockRef) +} + +type Network interface { + // SignAndPublishL2Payload is called by the driver whenever there is a new payload to publish, synchronously with the driver main loop. + SignAndPublishL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error +} + +type AltSync interface { + // RequestL2Range informs the sync source that the given range of L2 blocks is missing, + // and should be retrieved from any available alternative syncing source. + // The start and end of the range are exclusive: + // the start is the head we already have, the end is the first thing we have queued up. + // It's the task of the alt-sync mechanism to use this hint to fetch the right payloads. + // Note that the end and start may not be consistent: in this case the sync method should fetch older history + // + // If the end value is zeroed, then the sync-method may determine the end free of choice, + // e.g. sync till the chain head meets the wallclock time. This functionality is optional: + // a fixed target to sync towards may be determined by picking up payloads through P2P gossip or other sources. + // + // The sync results should be returned back to the driver via the OnUnsafeL2Payload(ctx, payload) method. + // The latest requested range should always take priority over previous requests. + // There may be overlaps in requested ranges. + // An error may be returned if the scheduling fails immediately, e.g. a context timeout. + RequestL2Range(ctx context.Context, start, end eth.L2BlockRef) error +} + +type SequencerStateListener interface { + SequencerStarted() error + SequencerStopped() error +} + +type Drain interface { + Drain() error + Await() <-chan struct{} +} diff --git a/op-node/rollup/driver/state.go b/op-node/rollup/driver/state.go deleted file mode 100644 index 20e980d6dfd91..0000000000000 --- a/op-node/rollup/driver/state.go +++ /dev/null @@ -1,495 +0,0 @@ -package driver - -import ( - "context" - "fmt" - gosync "sync" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-node/p2p" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/clsync" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-node/rollup/engine" - "github.com/ethereum-optimism/optimism/op-node/rollup/finality" - "github.com/ethereum-optimism/optimism/op-node/rollup/sequencing" - "github.com/ethereum-optimism/optimism/op-node/rollup/status" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" -) - -// Deprecated: use eth.SyncStatus instead. -type SyncStatus = eth.SyncStatus - -type Driver struct { - statusTracker SyncStatusTracker - - *SyncDeriver - - sched *StepSchedulingDeriver - - emitter event.Emitter - drain Drain - - // Requests to block the event loop for synchronous execution to avoid reading an inconsistent state - stateReq chan chan struct{} - - // Upon receiving a channel in this channel, the derivation pipeline is forced to be reset. - // It tells the caller that the reset occurred by closing the passed in channel. - forceReset chan chan struct{} - - // Driver config: verifier and sequencer settings. - // May not be modified after starting the Driver. - driverConfig *Config - - // Interface to signal the L2 block range to sync. - altSync AltSync - - sequencer sequencing.SequencerIface - - metrics Metrics - log log.Logger - - wg gosync.WaitGroup - - driverCtx context.Context - driverCancel context.CancelFunc -} - -// Start starts up the state loop. -// The loop will have been started iff err is not nil. -func (s *Driver) Start() error { - log.Info("Starting driver", "sequencerEnabled", s.driverConfig.SequencerEnabled, - "sequencerStopped", s.driverConfig.SequencerStopped, "recoverMode", s.driverConfig.RecoverMode) - if s.driverConfig.SequencerEnabled { - if s.driverConfig.RecoverMode { - log.Warn("sequencer is in recover mode") - s.sequencer.SetRecoverMode(true) - } - if err := s.sequencer.SetMaxSafeLag(s.driverCtx, s.driverConfig.SequencerMaxSafeLag); err != nil { - return fmt.Errorf("failed to set sequencer max safe lag: %w", err) - } - if err := s.sequencer.Init(s.driverCtx, !s.driverConfig.SequencerStopped); err != nil { - return fmt.Errorf("persist initial sequencer state: %w", err) - } - } - - s.wg.Add(1) - go s.eventLoop() - - return nil -} - -func (s *Driver) Close() error { - s.driverCancel() - s.wg.Wait() - s.sequencer.Close() - return nil -} - -// the eventLoop responds to L1 changes and internal timers to produce L2 blocks. -func (s *Driver) eventLoop() { - defer s.wg.Done() - s.log.Info("State loop started") - defer s.log.Info("State loop returned") - - defer s.driverCancel() - - // reqStep requests a derivation step nicely, with a delay if this is a reattempt, or not at all if we already scheduled a reattempt. - reqStep := func() { - s.emitter.Emit(s.driverCtx, StepReqEvent{}) - } - - // We call reqStep right away to finish syncing to the tip of the chain if we're behind. - // reqStep will also be triggered when the L1 head moves forward or if there was a reorg on the - // L1 chain that we need to handle. - reqStep() - - sequencerTimer := time.NewTimer(0) - var sequencerCh <-chan time.Time - var prevTime time.Time - // planSequencerAction updates the sequencerTimer with the next action, if any. - // The sequencerCh is nil (indefinitely blocks on read) if no action needs to be performed, - // or set to the timer channel if there is an action scheduled. - planSequencerAction := func() { - nextAction, ok := s.sequencer.NextAction() - if !ok { - if sequencerCh != nil { - s.log.Info("Sequencer paused until new events") - } - sequencerCh = nil - return - } - // avoid unnecessary timer resets - if nextAction == prevTime { - return - } - prevTime = nextAction - sequencerCh = sequencerTimer.C - if len(sequencerCh) > 0 { // empty if not already drained before resetting - <-sequencerCh - } - delta := time.Until(nextAction) - s.log.Info("Scheduled sequencer action", "delta", delta) - sequencerTimer.Reset(delta) - } - - // Create a ticker to check if there is a gap in the engine queue. Whenever - // there is, we send requests to sync source to retrieve the missing payloads. - syncCheckInterval := time.Duration(s.Config.BlockTime) * time.Second * 2 - altSyncTicker := time.NewTicker(syncCheckInterval) - defer altSyncTicker.Stop() - lastUnsafeL2 := s.Engine.UnsafeL2Head() - - for { - if s.driverCtx.Err() != nil { // don't try to schedule/handle more work when we are closing. - return - } - - planSequencerAction() - - // If the engine is not ready, or if the L2 head is actively changing, then reset the alt-sync: - // there is no need to request L2 blocks when we are syncing already. - if head := s.Engine.UnsafeL2Head(); head != lastUnsafeL2 || !s.Derivation.DerivationReady() { - lastUnsafeL2 = head - altSyncTicker.Reset(syncCheckInterval) - } - - select { - case <-sequencerCh: - s.Emitter.Emit(s.driverCtx, sequencing.SequencerActionEvent{}) - case <-altSyncTicker.C: - // Check if there is a gap in the current unsafe payload queue. - ctx, cancel := context.WithTimeout(s.driverCtx, time.Second*2) - err := s.checkForGapInUnsafeQueue(ctx) - cancel() - if err != nil { - s.log.Warn("failed to check for unsafe L2 blocks to sync", "err", err) - } - case <-s.sched.NextDelayedStep(): - s.emitter.Emit(s.driverCtx, StepAttemptEvent{}) - case <-s.sched.NextStep(): - s.emitter.Emit(s.driverCtx, StepAttemptEvent{}) - case respCh := <-s.stateReq: - respCh <- struct{}{} - case respCh := <-s.forceReset: - s.log.Warn("Derivation pipeline is manually reset") - s.Derivation.Reset() - s.metrics.RecordPipelineReset() - close(respCh) - case <-s.drain.Await(): - if err := s.drain.Drain(); err != nil { - if s.driverCtx.Err() != nil { - return - } else { - s.log.Error("unexpected error from event-draining", "err", err) - s.Emitter.Emit(s.driverCtx, rollup.CriticalErrorEvent{ - Err: fmt.Errorf("unexpected error: %w", err), - }) - } - } - case <-s.driverCtx.Done(): - return - } - } -} - -type SyncDeriver struct { - // The derivation pipeline is reset whenever we reorg. - // The derivation pipeline determines the new l2Safe. - Derivation DerivationPipeline - - SafeHeadNotifs rollup.SafeHeadListener // notified when safe head is updated - - CLSync CLSync - - // The engine controller is used by the sequencer & Derivation components. - // We will also use it for EL sync in a future PR. - Engine EngineController - - // Sync Mod Config - SyncCfg *sync.Config - - Config *rollup.Config - - L1 L1Chain - L2 L2Chain - - Emitter event.Emitter - - Log log.Logger - - Ctx context.Context - - // When in interop, and managed by an op-supervisor, - // the node performs a reset based on the instructions of the op-supervisor. - ManagedBySupervisor bool -} - -func (s *SyncDeriver) AttachEmitter(em event.Emitter) { - s.Emitter = em -} - -func (s *SyncDeriver) OnEvent(ctx context.Context, ev event.Event) bool { - switch x := ev.(type) { - case status.L1UnsafeEvent: - // a new L1 head may mean we have the data to not get an EOF again. - s.Emitter.Emit(ctx, StepReqEvent{}) - case finality.FinalizeL1Event: - // On "safe" L1 blocks: no step, justified L1 information does not do anything for L2 derivation or status. - // On "finalized" L1 blocks: we may be able to mark more L2 data as finalized now. - s.Emitter.Emit(ctx, StepReqEvent{}) - case p2p.ReceivedBlockEvent: - s.onIncomingP2PBlock(ctx, x.Envelope) - case StepEvent: - s.SyncStep() - case rollup.ResetEvent: - s.onResetEvent(ctx, x) - case rollup.L1TemporaryErrorEvent: - s.Log.Warn("L1 temporary error", "err", x.Err) - s.Emitter.Emit(ctx, StepReqEvent{}) - case rollup.EngineTemporaryErrorEvent: - s.Log.Warn("Engine temporary error", "err", x.Err) - // Make sure that for any temporarily failed attributes we retry processing. - // This will be triggered by a step. After appropriate backoff. - s.Emitter.Emit(ctx, StepReqEvent{}) - case engine.EngineResetConfirmedEvent: - s.onEngineConfirmedReset(ctx, x) - case derive.DeriverIdleEvent: - // Once derivation is idle the system is healthy - // and we can wait for new inputs. No backoff necessary. - s.Emitter.Emit(ctx, ResetStepBackoffEvent{}) - case derive.DeriverMoreEvent: - // If there is more data to process, - // continue derivation quickly - s.Emitter.Emit(ctx, StepReqEvent{ResetBackoff: true}) - case engine.SafeDerivedEvent: - s.onSafeDerivedBlock(ctx, x) - case engine.ELSyncStartedEvent: - s.onELSyncStarted() - case derive.ProvideL1Traversal: - s.Emitter.Emit(ctx, StepReqEvent{}) - default: - return false - } - return true -} - -func (s *SyncDeriver) onIncomingP2PBlock(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) { - // If we are doing CL sync or done with engine syncing, fallback to the unsafe payload queue & CL P2P sync. - if s.SyncCfg.SyncMode == sync.CLSync || !s.Engine.IsEngineSyncing() { - s.Log.Info("Optimistically queueing unsafe L2 execution payload", "id", envelope.ExecutionPayload.ID()) - s.Emitter.Emit(ctx, clsync.ReceivedUnsafePayloadEvent{Envelope: envelope}) - } else if s.SyncCfg.SyncMode == sync.ELSync { - ref, err := derive.PayloadToBlockRef(s.Config, envelope.ExecutionPayload) - if err != nil { - s.Log.Info("Failed to turn execution payload into a block ref", "id", envelope.ExecutionPayload.ID(), "err", err) - return - } - if ref.Number <= s.Engine.UnsafeL2Head().Number { - return - } - s.Log.Info("Optimistically inserting unsafe L2 execution payload to drive EL sync", "id", envelope.ExecutionPayload.ID()) - if err := s.Engine.InsertUnsafePayload(s.Ctx, envelope, ref); err != nil { - s.Log.Warn("Failed to insert unsafe payload for EL sync", "id", envelope.ExecutionPayload.ID(), "err", err) - } - } -} - -func (s *SyncDeriver) onSafeDerivedBlock(ctx context.Context, x engine.SafeDerivedEvent) { - if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() { - if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, x.Source.ID()); err != nil { - // At this point our state is in a potentially inconsistent state as we've updated the safe head - // in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back - // a little (it always rolls back at least 1 block) and then it will retry storing the entry - s.Emitter.Emit(ctx, rollup.ResetEvent{ - Err: fmt.Errorf("safe head notifications failed: %w", err), - }) - } - } -} - -func (s *SyncDeriver) onELSyncStarted() { - // The EL sync may progress the safe head in the EL without deriving those blocks from L1 - // which means the safe head db will miss entries so we need to remove all entries to avoid returning bad data - s.Log.Warn("Clearing safe head db because EL sync started") - if s.SafeHeadNotifs != nil { - if err := s.SafeHeadNotifs.SafeHeadReset(eth.L2BlockRef{}); err != nil { - s.Log.Error("Failed to notify safe-head reset when optimistically syncing") - } - } -} - -func (s *SyncDeriver) onEngineConfirmedReset(ctx context.Context, x engine.EngineResetConfirmedEvent) { - // If the listener update fails, we return, - // and don't confirm the engine-reset with the derivation pipeline. - // The pipeline will re-trigger a reset as necessary. - if s.SafeHeadNotifs != nil { - if err := s.SafeHeadNotifs.SafeHeadReset(x.CrossSafe); err != nil { - s.Log.Error("Failed to warn safe-head notifier of safe-head reset", "safe", x.CrossSafe) - return - } - if s.SafeHeadNotifs.Enabled() && x.CrossSafe.ID() == s.Config.Genesis.L2 { - // The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know - // we will process all safe head updates and can record genesis as always safe from L1 genesis. - // Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis - // but the contracts may have been deployed earlier than that, allowing creating a dispute game - // with a L1 head prior to cfg.Genesis.L1 - l1Genesis, err := s.L1.L1BlockRefByNumber(s.Ctx, 0) - if err != nil { - s.Log.Error("Failed to retrieve L1 genesis, cannot notify genesis as safe block", "err", err) - return - } - if err := s.SafeHeadNotifs.SafeHeadUpdated(x.CrossSafe, l1Genesis.ID()); err != nil { - s.Log.Error("Failed to notify safe-head listener of safe-head", "err", err) - return - } - } - } - s.Log.Info("Confirming pipeline reset") - s.Emitter.Emit(ctx, derive.ConfirmPipelineResetEvent{}) -} - -func (s *SyncDeriver) onResetEvent(ctx context.Context, x rollup.ResetEvent) { - if s.ManagedBySupervisor { - s.Log.Warn("Encountered reset when managed by op-supervisor, waiting for op-supervisor", "err", x.Err) - // IndexingMode will pick up the ResetEvent - return - } - // If the system corrupts, e.g. due to a reorg, simply reset it - s.Log.Warn("Deriver system is resetting", "err", x.Err) - s.Emitter.Emit(ctx, StepReqEvent{}) - s.Emitter.Emit(ctx, engine.ResetEngineRequestEvent{}) -} - -// SyncStep performs the sequence of encapsulated syncing steps. -// Warning: this sequence will be broken apart as outlined in op-node derivers design doc. -func (s *SyncDeriver) SyncStep() { - s.Log.Debug("Sync process step") - - s.Emitter.Emit(s.Ctx, engine.TryBackupUnsafeReorgEvent{}) - - s.Emitter.Emit(s.Ctx, engine.TryUpdateEngineEvent{}) - - if s.Engine.IsEngineSyncing() { - // The pipeline cannot move forwards if doing EL sync. - s.Log.Debug("Rollup driver is backing off because execution engine is syncing.", - "unsafe_head", s.Engine.UnsafeL2Head()) - s.Emitter.Emit(s.Ctx, ResetStepBackoffEvent{}) - return - } - - // Any now processed forkchoice updates will trigger CL-sync payload processing, if any payload is queued up. - - // Since we don't force attributes to be processed at this point, - // we cannot safely directly trigger the derivation, as that may generate new attributes that - // conflict with what attributes have not been applied yet. - // Instead, we request the engine to repeat where its pending-safe head is at. - // Upon the pending-safe signal the attributes deriver can then ask the pipeline - // to generate new attributes, if no attributes are known already. - s.Emitter.Emit(s.Ctx, engine.PendingSafeRequestEvent{}) - - // If interop is configured, we have to run the engine events, - // to ensure cross-L2 safety is continuously verified against the interop-backend. - if s.Config.InteropTime != nil && !s.ManagedBySupervisor { - s.Emitter.Emit(s.Ctx, engine.CrossUpdateRequestEvent{}) - } -} - -// ResetDerivationPipeline forces a reset of the derivation pipeline. -// It waits for the reset to occur. It simply unblocks the caller rather -// than fully cancelling the reset request upon a context cancellation. -func (s *Driver) ResetDerivationPipeline(ctx context.Context) error { - respCh := make(chan struct{}, 1) - select { - case <-ctx.Done(): - return ctx.Err() - case s.forceReset <- respCh: - select { - case <-ctx.Done(): - return ctx.Err() - case <-respCh: - return nil - } - } -} - -func (s *Driver) OnUnsafeL2Payload(ctx context.Context, payload *eth.ExecutionPayloadEnvelope) error { - s.emitter.Emit(ctx, p2p.ReceivedBlockEvent{ - From: "", - Envelope: payload, - }) - return nil -} - -func (s *Driver) StartSequencer(ctx context.Context, blockHash common.Hash) error { - return s.sequencer.Start(ctx, blockHash) -} - -func (s *Driver) StopSequencer(ctx context.Context) (common.Hash, error) { - return s.sequencer.Stop(ctx) -} - -func (s *Driver) SequencerActive(ctx context.Context) (bool, error) { - return s.sequencer.Active(), nil -} - -func (s *Driver) OverrideLeader(ctx context.Context) error { - return s.sequencer.OverrideLeader(ctx) -} - -func (s *Driver) ConductorEnabled(ctx context.Context) (bool, error) { - return s.sequencer.ConductorEnabled(ctx), nil -} - -func (s *Driver) SetRecoverMode(ctx context.Context, mode bool) error { - s.sequencer.SetRecoverMode(mode) - return nil -} - -// SyncStatus blocks the driver event loop and captures the syncing status. -func (s *Driver) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { - return s.statusTracker.SyncStatus(), nil -} - -// BlockRefWithStatus blocks the driver event loop and captures the syncing status, -// along with an L2 block reference by number consistent with that same status. -// If the event loop is too busy and the context expires, a context error is returned. -func (s *Driver) BlockRefWithStatus(ctx context.Context, num uint64) (eth.L2BlockRef, *eth.SyncStatus, error) { - resp := s.statusTracker.SyncStatus() - if resp.FinalizedL2.Number >= num { // If finalized, we are certain it does not reorg, and don't have to lock. - ref, err := s.L2.L2BlockRefByNumber(ctx, num) - return ref, resp, err - } - wait := make(chan struct{}) - select { - case s.stateReq <- wait: - resp := s.statusTracker.SyncStatus() - ref, err := s.L2.L2BlockRefByNumber(ctx, num) - <-wait - return ref, resp, err - case <-ctx.Done(): - return eth.L2BlockRef{}, nil, ctx.Err() - } -} - -// checkForGapInUnsafeQueue checks if there is a gap in the unsafe queue and attempts to retrieve the missing payloads from an alt-sync method. -// WARNING: This is only an outgoing signal, the blocks are not guaranteed to be retrieved. -// Results are received through OnUnsafeL2Payload. -func (s *Driver) checkForGapInUnsafeQueue(ctx context.Context) error { - start := s.Engine.UnsafeL2Head() - end := s.CLSync.LowestQueuedUnsafeBlock() - // Check if we have missing blocks between the start and end. Request them if we do. - if end == (eth.L2BlockRef{}) { - s.log.Debug("requesting sync with open-end range", "start", start) - return s.altSync.RequestL2Range(ctx, start, eth.L2BlockRef{}) - } else if end.Number > start.Number+1 { - s.log.Debug("requesting missing unsafe L2 block range", "start", start, "end", end, "size", end.Number-start.Number) - return s.altSync.RequestL2Range(ctx, start, end) - } - return nil -} diff --git a/op-node/rollup/driver/steps.go b/op-node/rollup/driver/step_scheduling_deriver.go similarity index 63% rename from op-node/rollup/driver/steps.go rename to op-node/rollup/driver/step_scheduling_deriver.go index 2b29e1d13f654..31e1c550e855f 100644 --- a/op-node/rollup/driver/steps.go +++ b/op-node/rollup/driver/step_scheduling_deriver.go @@ -10,36 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/retry" ) -type ResetStepBackoffEvent struct { -} - -func (ev ResetStepBackoffEvent) String() string { - return "reset-step-backoff" -} - -type StepDelayedReqEvent struct { - Delay time.Duration -} - -func (ev StepDelayedReqEvent) String() string { - return "step-delayed-req" -} - -type StepReqEvent struct { - ResetBackoff bool -} - -func (ev StepReqEvent) String() string { - return "step-req" -} - -type StepAttemptEvent struct { -} - -func (ev StepAttemptEvent) String() string { - return "step-attempt" -} - type StepEvent struct { } @@ -47,6 +17,15 @@ func (ev StepEvent) String() string { return "step" } +type StepDeriver interface { + event.AttachEmitter + NextStep() <-chan struct{} + NextDelayedStep() <-chan time.Time + RequestStep(ctx context.Context, resetBackoff bool) + AttemptStep(ctx context.Context) + ResetStepBackoff(ctx context.Context) +} + // StepSchedulingDeriver is a deriver that emits StepEvent events. // The deriver can be requested to schedule a step with a StepReqEvent. // @@ -102,7 +81,7 @@ func (s *StepSchedulingDeriver) NextDelayedStep() <-chan time.Time { return s.delayedStepReq } -func (s *StepSchedulingDeriver) OnEvent(ctx context.Context, ev event.Event) bool { +func (s *StepSchedulingDeriver) RequestStep(ctx context.Context, resetBackoff bool) { step := func() { s.delayedStepReq = nil select { @@ -112,40 +91,38 @@ func (s *StepSchedulingDeriver) OnEvent(ctx context.Context, ev event.Event) boo } } - switch x := ev.(type) { - case StepDelayedReqEvent: + if resetBackoff { + s.stepAttempts = 0 + } + if s.stepAttempts > 0 { + // if this is not the first attempt, we re-schedule with a backoff, *without blocking other events* if s.delayedStepReq == nil { - s.delayedStepReq = time.After(x.Delay) - } - case StepReqEvent: - if x.ResetBackoff { - s.stepAttempts = 0 - } - if s.stepAttempts > 0 { - // if this is not the first attempt, we re-schedule with a backoff, *without blocking other events* - if s.delayedStepReq == nil { - delay := s.bOffStrategy.Duration(s.stepAttempts) - s.log.Debug("scheduling re-attempt with delay", "attempts", s.stepAttempts, "delay", delay) - s.delayedStepReq = time.After(delay) - } else { - s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", s.stepAttempts) - } + delay := s.bOffStrategy.Duration(s.stepAttempts) + s.log.Debug("scheduling re-attempt with delay", "attempts", s.stepAttempts, "delay", delay) + s.delayedStepReq = time.After(delay) } else { - step() - } - case StepAttemptEvent: - // clear the delayed-step channel - s.delayedStepReq = nil - if s.stepAttempts > 0 { - s.log.Debug("Running step retry", "attempts", s.stepAttempts) + s.log.Debug("ignoring step request, already scheduled re-attempt after previous failure", "attempts", s.stepAttempts) } - // count as attempt by default. We reset to 0 if we are making healthy progress. - s.stepAttempts += 1 - s.emitter.Emit(ctx, StepEvent(x)) - case ResetStepBackoffEvent: - s.stepAttempts = 0 - default: - return false + } else { + step() } - return true +} + +func (s *StepSchedulingDeriver) AttemptStep(ctx context.Context) { + // clear the delayed-step channel + s.delayedStepReq = nil + if s.stepAttempts > 0 { + s.log.Debug("Running step retry", "attempts", s.stepAttempts) + } + // count as attempt by default. We reset to 0 if we are making healthy progress. + s.stepAttempts += 1 + s.emitter.Emit(ctx, StepEvent{}) +} + +func (s *StepSchedulingDeriver) ResetStepBackoff(ctx context.Context) { + s.stepAttempts = 0 +} + +func (s *StepSchedulingDeriver) OnEvent(ctx context.Context, ev event.Event) bool { + return false } diff --git a/op-node/rollup/driver/steps_test.go b/op-node/rollup/driver/step_scheduling_deriver.go_test.go similarity index 70% rename from op-node/rollup/driver/steps_test.go rename to op-node/rollup/driver/step_scheduling_deriver.go_test.go index 60c74d34fad88..6b7a3ad56fb8a 100644 --- a/op-node/rollup/driver/steps_test.go +++ b/op-node/rollup/driver/step_scheduling_deriver.go_test.go @@ -21,35 +21,35 @@ func TestStepSchedulingDeriver(t *testing.T) { sched := NewStepSchedulingDeriver(logger) sched.AttachEmitter(emitter) require.Len(t, sched.NextStep(), 0, "start empty") - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 1, "take request") - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 1, "ignore duplicate request") require.Empty(t, queued, "only scheduled so far, no step attempts yet") <-sched.NextStep() - sched.OnEvent(context.Background(), StepAttemptEvent{}) + sched.AttemptStep(context.Background()) require.Equal(t, []event.Event{StepEvent{}}, queued, "got step event") require.Nil(t, sched.NextDelayedStep(), "no delayed steps yet") - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.NotNil(t, sched.NextDelayedStep(), "2nd attempt before backoff reset causes delayed step to be scheduled") - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.NotNil(t, sched.NextDelayedStep(), "can continue to request attempts") - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 0, "no step requests accepted without delay if backoff is counting") - sched.OnEvent(context.Background(), StepReqEvent{ResetBackoff: true}) + sched.RequestStep(context.Background(), true) require.Len(t, sched.NextStep(), 1, "request accepted if backoff is reset") <-sched.NextStep() - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 1, "no backoff, no attempt has been made yet") <-sched.NextStep() - sched.OnEvent(context.Background(), StepAttemptEvent{}) - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.AttemptStep(context.Background()) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 0, "backoff again") - sched.OnEvent(context.Background(), ResetStepBackoffEvent{}) - sched.OnEvent(context.Background(), StepReqEvent{}) + sched.ResetStepBackoff(context.Background()) + sched.RequestStep(context.Background(), false) require.Len(t, sched.NextStep(), 1, "reset backoff accepted, was able to schedule non-delayed step") } diff --git a/op-node/rollup/driver/sync_deriver.go b/op-node/rollup/driver/sync_deriver.go new file mode 100644 index 0000000000000..248844e9622e8 --- /dev/null +++ b/op-node/rollup/driver/sync_deriver.go @@ -0,0 +1,245 @@ +package driver + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/engine" + "github.com/ethereum-optimism/optimism/op-node/rollup/status" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/event" + "github.com/ethereum/go-ethereum/log" +) + +type SyncDeriver struct { + // The derivation pipeline is reset whenever we reorg. + // The derivation pipeline determines the new l2Safe. + Derivation DerivationPipeline + + SafeHeadNotifs rollup.SafeHeadListener // notified when safe head is updated + + // The engine controller is used by the sequencer & Derivation components. + // We will also use it for EL sync in a future PR. + Engine *engine.EngineController + + // Sync Mod Config + SyncCfg *sync.Config + + Config *rollup.Config + + L1 L1Chain + // Track L1 view when new unsafe L1 block is observed + L1Tracker *status.L1Tracker + L2 L2Chain + + Emitter event.Emitter + + Log log.Logger + + Ctx context.Context + + // When in interop, and managed by an op-supervisor, + // the node performs a reset based on the instructions of the op-supervisor. + ManagedBySupervisor bool + + StepDeriver StepDeriver +} + +func (s *SyncDeriver) AttachEmitter(em event.Emitter) { + s.Emitter = em +} + +func (s *SyncDeriver) OnL1Unsafe(ctx context.Context) { + // a new L1 head may mean we have the data to not get an EOF again. + s.StepDeriver.RequestStep(ctx, false) +} + +func (s *SyncDeriver) OnL1Finalized(ctx context.Context) { + // On "safe" L1 blocks: no step, justified L1 information does not do anything for L2 derivation or status. + // On "finalized" L1 blocks: we may be able to mark more L2 data as finalized now. + s.StepDeriver.RequestStep(ctx, false) +} + +func (s *SyncDeriver) OnEvent(ctx context.Context, ev event.Event) bool { + // TODO(#16917) Remove Event System Refactor Comments + // ELSyncStartedEvent is removed and OnELSyncStarted is synchronously called at EngineController + // ReceivedBlockEvent is removed and OnUnsafeL2Payload is synchronously called at NewBlockReceiver + // L1UnsafeEvent is removed and OnL1Unsafe is synchronously called at L1Handler + // FinalizeL1Event is removed and OnL1Finalized is synchronously called at L1Handler + switch x := ev.(type) { + case StepEvent: + s.SyncStep() + case rollup.ResetEvent: + s.onResetEvent(ctx, x) + case rollup.L1TemporaryErrorEvent: + s.Log.Warn("L1 temporary error", "err", x.Err) + s.StepDeriver.RequestStep(ctx, false) + case rollup.EngineTemporaryErrorEvent: + s.Log.Warn("Engine temporary error", "err", x.Err) + // Make sure that for any temporarily failed attributes we retry processing. + // This will be triggered by a step. After appropriate backoff. + s.StepDeriver.RequestStep(ctx, false) + case engine.EngineResetConfirmedEvent: + s.onEngineConfirmedReset(ctx, x) + case derive.DeriverIdleEvent: + // Once derivation is idle the system is healthy + // and we can wait for new inputs. No backoff necessary. + s.StepDeriver.ResetStepBackoff(ctx) + case derive.DeriverMoreEvent: + // If there is more data to process, + // continue derivation quickly + s.StepDeriver.RequestStep(ctx, true) + case engine.SafeDerivedEvent: + s.onSafeDerivedBlock(ctx, x) + case derive.ProvideL1Traversal: + s.StepDeriver.RequestStep(ctx, false) + default: + return false + } + return true +} + +func (s *SyncDeriver) OnUnsafeL2Payload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) { + // If we are doing CL sync or done with engine syncing, fallback to the unsafe payload queue & CL P2P sync. + if s.SyncCfg.SyncMode == sync.CLSync || !s.Engine.IsEngineSyncing() { + s.Log.Info("Optimistically queueing unsafe L2 execution payload", "id", envelope.ExecutionPayload.ID()) + s.Engine.AddUnsafePayload(ctx, envelope) + } else if s.SyncCfg.SyncMode == sync.ELSync { + ref, err := derive.PayloadToBlockRef(s.Config, envelope.ExecutionPayload) + if err != nil { + s.Log.Info("Failed to turn execution payload into a block ref", "id", envelope.ExecutionPayload.ID(), "err", err) + return + } + if ref.Number <= s.Engine.UnsafeL2Head().Number { + return + } + s.Log.Info("Optimistically inserting unsafe L2 execution payload to drive EL sync", "id", envelope.ExecutionPayload.ID()) + if err := s.Engine.InsertUnsafePayload(s.Ctx, envelope, ref); err != nil { + s.Log.Warn("Failed to insert unsafe payload for EL sync", "id", envelope.ExecutionPayload.ID(), "err", err) + } + } +} + +func (s *SyncDeriver) onSafeDerivedBlock(ctx context.Context, x engine.SafeDerivedEvent) { + if s.SafeHeadNotifs != nil && s.SafeHeadNotifs.Enabled() { + if err := s.SafeHeadNotifs.SafeHeadUpdated(x.Safe, x.Source.ID()); err != nil { + // At this point our state is in a potentially inconsistent state as we've updated the safe head + // in the execution client but failed to post process it. Reset the pipeline so the safe head rolls back + // a little (it always rolls back at least 1 block) and then it will retry storing the entry + s.Emitter.Emit(ctx, rollup.ResetEvent{ + Err: fmt.Errorf("safe head notifications failed: %w", err), + }) + } + } +} + +func (s *SyncDeriver) OnELSyncStarted() { + // The EL sync may progress the safe head in the EL without deriving those blocks from L1 + // which means the safe head db will miss entries so we need to remove all entries to avoid returning bad data + s.Log.Warn("Clearing safe head db because EL sync started") + if s.SafeHeadNotifs != nil { + if err := s.SafeHeadNotifs.SafeHeadReset(eth.L2BlockRef{}); err != nil { + s.Log.Error("Failed to notify safe-head reset when optimistically syncing") + } + } +} + +func (s *SyncDeriver) onEngineConfirmedReset(ctx context.Context, x engine.EngineResetConfirmedEvent) { + // If the listener update fails, we return, + // and don't confirm the engine-reset with the derivation pipeline. + // The pipeline will re-trigger a reset as necessary. + if s.SafeHeadNotifs != nil { + if err := s.SafeHeadNotifs.SafeHeadReset(x.CrossSafe); err != nil { + s.Log.Error("Failed to warn safe-head notifier of safe-head reset", "safe", x.CrossSafe) + return + } + if s.SafeHeadNotifs.Enabled() && x.CrossSafe.ID() == s.Config.Genesis.L2 { + // The rollup genesis block is always safe by definition. So if the pipeline resets this far back we know + // we will process all safe head updates and can record genesis as always safe from L1 genesis. + // Note that it is not safe to use cfg.Genesis.L1 here as it is the block immediately before the L2 genesis + // but the contracts may have been deployed earlier than that, allowing creating a dispute game + // with a L1 head prior to cfg.Genesis.L1 + l1Genesis, err := s.L1.L1BlockRefByNumber(s.Ctx, 0) + if err != nil { + s.Log.Error("Failed to retrieve L1 genesis, cannot notify genesis as safe block", "err", err) + return + } + if err := s.SafeHeadNotifs.SafeHeadUpdated(x.CrossSafe, l1Genesis.ID()); err != nil { + s.Log.Error("Failed to notify safe-head listener of safe-head", "err", err) + return + } + } + } + s.Log.Info("Confirming pipeline reset") + s.Emitter.Emit(ctx, derive.ConfirmPipelineResetEvent{}) +} + +func (s *SyncDeriver) onResetEvent(ctx context.Context, x rollup.ResetEvent) { + if s.ManagedBySupervisor { + s.Log.Warn("Encountered reset when managed by op-supervisor, waiting for op-supervisor", "err", x.Err) + // IndexingMode will pick up the ResetEvent + return + } + // If the system corrupts, e.g. due to a reorg, simply reset it + s.Log.Warn("Deriver system is resetting", "err", x.Err) + s.Emitter.Emit(ctx, engine.ResetEngineRequestEvent{}) + s.StepDeriver.RequestStep(ctx, false) +} + +func (s *SyncDeriver) tryBackupUnsafeReorg() { + // If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c + // this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called). + fcuCalled, err := s.Engine.TryBackupUnsafeReorg(s.Ctx) + // Dealing with legacy here: it used to skip over the error-handling if fcuCalled was false. + // But that combination is not actually a code-path in TryBackupUnsafeReorg. + // We should drop fcuCalled, and make the function emit events directly, + // once there are no more synchronous callers. + if !fcuCalled && err != nil { + s.Log.Crit("unexpected TryBackupUnsafeReorg error after no FCU call", "err", err) + } + if err != nil { + // If we needed to perform a network call, then we should yield even if we did not encounter an error. + if errors.Is(err, derive.ErrReset) { + s.Emitter.Emit(s.Ctx, rollup.ResetEvent{Err: err}) + } else if errors.Is(err, derive.ErrTemporary) { + s.Emitter.Emit(s.Ctx, rollup.EngineTemporaryErrorEvent{Err: err}) + } else { + s.Emitter.Emit(s.Ctx, rollup.CriticalErrorEvent{ + Err: fmt.Errorf("unexpected TryBackupUnsafeReorg error type: %w", err), + }) + } + } +} + +// SyncStep performs the sequence of encapsulated syncing steps. +// Warning: this sequence will be broken apart as outlined in op-node derivers design doc. +func (s *SyncDeriver) SyncStep() { + s.Log.Debug("Sync process step") + + s.tryBackupUnsafeReorg() + + s.Engine.TryUpdateEngine(s.Ctx) + + if s.Engine.IsEngineSyncing() { + // The pipeline cannot move forwards if doing EL sync. + s.Log.Debug("Rollup driver is backing off because execution engine is syncing.", + "unsafe_head", s.Engine.UnsafeL2Head()) + s.StepDeriver.ResetStepBackoff(s.Ctx) + return + } + + // Any now processed forkchoice updates will trigger CL-sync payload processing, if any payload is queued up. + + // Since we don't force attributes to be processed at this point, + // we cannot safely directly trigger the derivation, as that may generate new attributes that + // conflict with what attributes have not been applied yet. + // Instead, we request the engine to repeat where its pending-safe head is at. + // Upon the pending-safe signal the attributes deriver can then ask the pipeline + // to generate new attributes, if no attributes are known already. + s.Engine.RequestPendingSafeUpdate(s.Ctx) + +} diff --git a/op-node/rollup/engine/api.go b/op-node/rollup/engine/api.go index 7e03e4b6e420c..25bd2ab8aec7e 100644 --- a/op-node/rollup/engine/api.go +++ b/op-node/rollup/engine/api.go @@ -21,25 +21,25 @@ type RollupAPI interface { var _ RollupAPI = (*EngineController)(nil) -func (ec *EngineController) OpenBlock(ctx context.Context, parent eth.BlockID, attrs *eth.PayloadAttributes) (eth.PayloadInfo, error) { - ec.mu.Lock() - defer ec.mu.Unlock() +func (e *EngineController) OpenBlock(ctx context.Context, parent eth.BlockID, attrs *eth.PayloadAttributes) (eth.PayloadInfo, error) { + e.mu.Lock() + defer e.mu.Unlock() - _, err := ec.engine.L2BlockRefByHash(ctx, parent.Hash) + _, err := e.engine.L2BlockRefByHash(ctx, parent.Hash) if err != nil { return eth.PayloadInfo{}, fmt.Errorf("failed to retrieve parent block %s from engine: %w", parent, err) } - if err := ec.initializeUnknowns(ctx); err != nil { + if err := e.initializeUnknowns(ctx); err != nil { return eth.PayloadInfo{}, fmt.Errorf("failed to initialize forkchoice pre-state: %w", err) } fc := eth.ForkchoiceState{ HeadBlockHash: parent.Hash, - SafeBlockHash: ec.safeHead.Hash, - FinalizedBlockHash: ec.finalizedHead.Hash, + SafeBlockHash: e.safeHead.Hash, + FinalizedBlockHash: e.finalizedHead.Hash, } - id, errTyp, err := startPayload(ctx, ec.engine, fc, attrs) + id, errTyp, err := e.startPayload(ctx, fc, attrs) if err != nil { switch errTyp { case BlockInsertTemporaryErr: @@ -71,10 +71,10 @@ func (ec *EngineController) OpenBlock(ctx context.Context, parent eth.BlockID, a }, nil } -func (ec *EngineController) CancelBlock(ctx context.Context, id eth.PayloadInfo) error { - ec.mu.Lock() - defer ec.mu.Unlock() - _, err := ec.engine.GetPayload(ctx, id) +func (e *EngineController) CancelBlock(ctx context.Context, id eth.PayloadInfo) error { + e.mu.Lock() + defer e.mu.Unlock() + _, err := e.engine.GetPayload(ctx, id) if err != nil { var rpcErr rpc.Error if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload { @@ -91,10 +91,10 @@ func (ec *EngineController) CancelBlock(ctx context.Context, id eth.PayloadInfo) return nil } -func (ec *EngineController) SealBlock(ctx context.Context, id eth.PayloadInfo) (*eth.ExecutionPayloadEnvelope, error) { - ec.mu.Lock() - defer ec.mu.Unlock() - envelope, err := ec.engine.GetPayload(ctx, id) +func (e *EngineController) SealBlock(ctx context.Context, id eth.PayloadInfo) (*eth.ExecutionPayloadEnvelope, error) { + e.mu.Lock() + defer e.mu.Unlock() + envelope, err := e.engine.GetPayload(ctx, id) if err != nil { var rpcErr rpc.Error if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload { @@ -111,17 +111,17 @@ func (ec *EngineController) SealBlock(ctx context.Context, id eth.PayloadInfo) ( return envelope, nil } -func (ec *EngineController) CommitBlock(ctx context.Context, signed *opsigner.SignedExecutionPayloadEnvelope) error { - ec.mu.Lock() - defer ec.mu.Unlock() +func (e *EngineController) CommitBlock(ctx context.Context, signed *opsigner.SignedExecutionPayloadEnvelope) error { + e.mu.Lock() + defer e.mu.Unlock() envelope := signed.Envelope - ref, err := derive.PayloadToBlockRef(ec.rollupCfg, envelope.ExecutionPayload) + ref, err := derive.PayloadToBlockRef(e.rollupCfg, envelope.ExecutionPayload) if err != nil { return fmt.Errorf("invalid payload: %w", err) } - status, err := ec.engine.NewPayload(ctx, envelope.ExecutionPayload, envelope.ParentBeaconBlockRoot) + status, err := e.engine.NewPayload(ctx, envelope.ExecutionPayload, envelope.ParentBeaconBlockRoot) if err != nil { return fmt.Errorf("failed to insert payload: %w", err) } @@ -136,9 +136,9 @@ func (ec *EngineController) CommitBlock(ctx context.Context, signed *opsigner.Si break } - ec.SetUnsafeHead(ref) - ec.emitter.Emit(ctx, UnsafeUpdateEvent{Ref: ref}) - if err := ec.TryUpdateEngine(ctx); err != nil { + e.SetUnsafeHead(ref) + e.emitter.Emit(ctx, UnsafeUpdateEvent{Ref: ref}) + if err := e.tryUpdateEngineInternal(ctx); err != nil { return fmt.Errorf("failed to update engine forkchoice: %w", err) } return nil diff --git a/op-node/rollup/engine/build_cancel.go b/op-node/rollup/engine/build_cancel.go index b4f2ce28f1ee1..7946233c40cd0 100644 --- a/op-node/rollup/engine/build_cancel.go +++ b/op-node/rollup/engine/build_cancel.go @@ -19,21 +19,21 @@ func (ev BuildCancelEvent) String() string { return "build-cancel" } -func (eq *EngDeriver) onBuildCancel(ctx context.Context, ev BuildCancelEvent) { - rpcCtx, cancel := context.WithTimeout(eq.ctx, buildCancelTimeout) +func (e *EngineController) onBuildCancel(ctx context.Context, ev BuildCancelEvent) { + rpcCtx, cancel := context.WithTimeout(e.ctx, buildCancelTimeout) defer cancel() // the building job gets wrapped up as soon as the payload is retrieved, there's no explicit cancel in the Engine API - eq.log.Warn("cancelling old block building job", "info", ev.Info) - _, err := eq.ec.engine.GetPayload(rpcCtx, ev.Info) + e.log.Warn("cancelling old block building job", "info", ev.Info) + _, err := e.engine.GetPayload(rpcCtx, ev.Info) if err != nil { var rpcErr rpc.Error if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload { - eq.log.Warn("tried cancelling unknown block building job", "info", ev.Info, "err", err) + e.log.Warn("tried cancelling unknown block building job", "info", ev.Info, "err", err) return // if unknown, then it did not need to be cancelled anymore. } - eq.log.Error("failed to cancel block building job", "info", ev.Info, "err", err) + e.log.Error("failed to cancel block building job", "info", ev.Info, "err", err) if !ev.Force { - eq.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) } } } diff --git a/op-node/rollup/engine/build_invalid.go b/op-node/rollup/engine/build_invalid.go index 62872a053a602..16f4e96ab97f7 100644 --- a/op-node/rollup/engine/build_invalid.go +++ b/op-node/rollup/engine/build_invalid.go @@ -30,43 +30,43 @@ func (ev InvalidPayloadAttributesEvent) String() string { return "invalid-payload-attributes" } -func (eq *EngDeriver) onBuildInvalid(ctx context.Context, ev BuildInvalidEvent) { - eq.log.Warn("could not process payload attributes", "err", ev.Err) +func (e *EngineController) onBuildInvalid(ctx context.Context, ev BuildInvalidEvent) { + e.log.Warn("could not process payload attributes", "err", ev.Err) // Deposit transaction execution errors are suppressed in the execution engine, but if the // block is somehow invalid, there is nothing we can do to recover & we should exit. if ev.Attributes.Attributes.IsDepositsOnly() { - eq.log.Error("deposit only block was invalid", "parent", ev.Attributes.Parent, "err", ev.Err) - eq.emitter.Emit(ctx, rollup.CriticalErrorEvent{ + e.log.Error("deposit only block was invalid", "parent", ev.Attributes.Parent, "err", ev.Err) + e.emitter.Emit(ctx, rollup.CriticalErrorEvent{ Err: fmt.Errorf("failed to process block with only deposit transactions: %w", ev.Err), }) return } - if ev.Attributes.IsDerived() && eq.cfg.IsHolocene(ev.Attributes.DerivedFrom.Time) { - eq.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Attributes.Parent.ID(), ev.Attributes.DerivedFrom) + if ev.Attributes.IsDerived() && e.rollupCfg.IsHolocene(ev.Attributes.DerivedFrom.Time) { + e.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Attributes.Parent.ID(), ev.Attributes.DerivedFrom) return } // Revert the pending safe head to the safe head. - eq.ec.SetPendingSafeL2Head(eq.ec.SafeL2Head()) + e.SetPendingSafeL2Head(e.safeHead) // suppress the error b/c we want to retry with the next batch from the batch queue // If there is no valid batch the node will eventually force a deposit only block. If // the deposit only block fails, this will return the critical error above. // Try to restore to previous known unsafe chain. - eq.ec.SetBackupUnsafeL2Head(eq.ec.BackupUnsafeL2Head(), true) + e.SetBackupUnsafeL2Head(e.backupUnsafeHead, true) // drop the payload without inserting it into the engine // Signal that we deemed the attributes as unfit - eq.emitter.Emit(ctx, InvalidPayloadAttributesEvent(ev)) + e.emitter.Emit(ctx, InvalidPayloadAttributesEvent(ev)) } -func (eq *EngDeriver) emitDepositsOnlyPayloadAttributesRequest(ctx context.Context, parent eth.BlockID, derivedFrom eth.L1BlockRef) { - eq.log.Warn("Holocene active, requesting deposits-only attributes", "parent", parent, "derived_from", derivedFrom) +func (e *EngineController) emitDepositsOnlyPayloadAttributesRequest(ctx context.Context, parent eth.BlockID, derivedFrom eth.L1BlockRef) { + e.log.Warn("Holocene active, requesting deposits-only attributes", "parent", parent, "derived_from", derivedFrom) // request deposits-only version - eq.emitter.Emit(ctx, derive.DepositsOnlyPayloadAttributesRequestEvent{ + e.emitter.Emit(ctx, derive.DepositsOnlyPayloadAttributesRequestEvent{ Parent: parent, DerivedFrom: derivedFrom, }) diff --git a/op-node/rollup/engine/build_seal.go b/op-node/rollup/engine/build_seal.go index 63b80fe1a4bc2..026b25dce3206 100644 --- a/op-node/rollup/engine/build_seal.go +++ b/op-node/rollup/engine/build_seal.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -54,16 +55,16 @@ func (ev BuildSealEvent) String() string { return "build-seal" } -func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { - rpcCtx, cancel := context.WithTimeout(eq.ctx, buildSealTimeout) +func (e *EngineController) onBuildSeal(ctx context.Context, ev BuildSealEvent) { + rpcCtx, cancel := context.WithTimeout(e.ctx, buildSealTimeout) defer cancel() sealingStart := time.Now() - envelope, err := eq.ec.engine.GetPayload(rpcCtx, ev.Info) + envelope, err := e.engine.GetPayload(rpcCtx, ev.Info) if err != nil { var rpcErr rpc.Error if errors.As(err, &rpcErr) && eth.ErrorCode(rpcErr.ErrorCode()) == eth.UnknownPayload { - eq.log.Warn("Cannot seal block, payload ID is unknown", + e.log.Warn("Cannot seal block, payload ID is unknown", "payloadID", ev.Info.ID, "payload_time", ev.Info.Timestamp, "started_time", ev.BuildStarted) } @@ -72,7 +73,7 @@ func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { // So the user (attributes-handler or sequencer) should be able to re-attempt the exact // same attributes with a new block-building job from here to recover from this error. // We name it "expired", as this generally identifies a timeout, unknown job, or otherwise invalidated work. - eq.emitter.Emit(ctx, PayloadSealExpiredErrorEvent{ + e.emitter.Emit(ctx, PayloadSealExpiredErrorEvent{ Info: ev.Info, Err: fmt.Errorf("failed to seal execution payload (ID: %s): %w", ev.Info.ID, err), Concluding: ev.Concluding, @@ -82,7 +83,7 @@ func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { } if err := sanityCheckPayload(envelope.ExecutionPayload); err != nil { - eq.emitter.Emit(ctx, PayloadSealInvalidEvent{ + e.emitter.Emit(ctx, PayloadSealInvalidEvent{ Info: ev.Info, Err: fmt.Errorf("failed sanity-check of execution payload contents (ID: %s, blockhash: %s): %w", ev.Info.ID, envelope.ExecutionPayload.BlockHash, err), @@ -92,9 +93,9 @@ func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { return } - ref, err := derive.PayloadToBlockRef(eq.cfg, envelope.ExecutionPayload) + ref, err := derive.PayloadToBlockRef(e.rollupCfg, envelope.ExecutionPayload) if err != nil { - eq.emitter.Emit(ctx, PayloadSealInvalidEvent{ + e.emitter.Emit(ctx, PayloadSealInvalidEvent{ Info: ev.Info, Err: fmt.Errorf("failed to decode L2 block ref from payload: %w", err), Concluding: ev.Concluding, @@ -106,17 +107,17 @@ func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { now := time.Now() sealTime := now.Sub(sealingStart) buildTime := now.Sub(ev.BuildStarted) - eq.metrics.RecordSequencerSealingTime(sealTime) - eq.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(eq.cfg.BlockTime)*time.Second) + e.metrics.RecordSequencerSealingTime(sealTime) + e.metrics.RecordSequencerBuildingDiffTime(buildTime - time.Duration(e.rollupCfg.BlockTime)*time.Second) txnCount := len(envelope.ExecutionPayload.Transactions) depositCount, _ := lastDeposit(envelope.ExecutionPayload.Transactions) - eq.metrics.CountSequencedTxsInBlock(txnCount, depositCount) + e.metrics.CountSequencedTxsInBlock(txnCount, depositCount) - eq.log.Debug("Built new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin, + e.log.Debug("Built new L2 block", "l2_unsafe", ref, "l1_origin", ref.L1Origin, "txs", txnCount, "deposits", depositCount, "time", ref.Time, "seal_time", sealTime, "build_time", buildTime) - eq.emitter.Emit(ctx, BuildSealedEvent{ + e.emitter.Emit(ctx, BuildSealedEvent{ Concluding: ev.Concluding, DerivedFrom: ev.DerivedFrom, BuildStarted: ev.BuildStarted, @@ -125,3 +126,58 @@ func (eq *EngDeriver) onBuildSeal(ctx context.Context, ev BuildSealEvent) { Ref: ref, }) } + +// isDepositTx checks an opaqueTx to determine if it is a Deposit Transaction +// It has to return an error in the case the transaction is empty +func isDepositTx(opaqueTx eth.Data) (bool, error) { + if len(opaqueTx) == 0 { + return false, errors.New("empty transaction") + } + return opaqueTx[0] == types.DepositTxType, nil +} + +// lastDeposit finds the index of last deposit at the start of the transactions. +// It walks the transactions from the start until it finds a non-deposit tx. +// An error is returned if any looked at transaction cannot be decoded +func lastDeposit(txns []eth.Data) (int, error) { + var lastDeposit int + for i, tx := range txns { + deposit, err := isDepositTx(tx) + if err != nil { + return 0, fmt.Errorf("invalid transaction at idx %d", i) + } + if deposit { + lastDeposit = i + } else { + break + } + } + return lastDeposit, nil +} + +func sanityCheckPayload(payload *eth.ExecutionPayload) error { + // Sanity check payload before inserting it + if len(payload.Transactions) == 0 { + return errors.New("no transactions in returned payload") + } + if payload.Transactions[0][0] != types.DepositTxType { + return fmt.Errorf("first transaction was not deposit tx. Got %v", payload.Transactions[0][0]) + } + // Ensure that the deposits are first + lastDeposit, err := lastDeposit(payload.Transactions) + if err != nil { + return fmt.Errorf("failed to find last deposit: %w", err) + } + // Ensure no deposits after last deposit + for i := lastDeposit + 1; i < len(payload.Transactions); i++ { + tx := payload.Transactions[i] + deposit, err := isDepositTx(tx) + if err != nil { + return fmt.Errorf("failed to decode transaction idx %d: %w", i, err) + } + if deposit { + return fmt.Errorf("deposit tx (%d) after other tx in l2 block with prev deposit at idx %d", i, lastDeposit) + } + } + return nil +} diff --git a/op-node/rollup/engine/build_sealed.go b/op-node/rollup/engine/build_sealed.go index 6d1197cf9100b..2a32cdbea2885 100644 --- a/op-node/rollup/engine/build_sealed.go +++ b/op-node/rollup/engine/build_sealed.go @@ -25,10 +25,10 @@ func (ev BuildSealedEvent) String() string { return "build-sealed" } -func (eq *EngDeriver) onBuildSealed(ctx context.Context, ev BuildSealedEvent) { +func (e *EngineController) onBuildSealed(ctx context.Context, ev BuildSealedEvent) { // If a (pending) safe block, immediately process the block if ev.DerivedFrom != (eth.L1BlockRef{}) { - eq.emitter.Emit(ctx, PayloadProcessEvent{ + e.emitter.Emit(ctx, PayloadProcessEvent{ Concluding: ev.Concluding, DerivedFrom: ev.DerivedFrom, Envelope: ev.Envelope, diff --git a/op-node/rollup/engine/build_start.go b/op-node/rollup/engine/build_start.go index de439773d1296..b5b9b8ec98915 100644 --- a/op-node/rollup/engine/build_start.go +++ b/op-node/rollup/engine/build_start.go @@ -18,25 +18,25 @@ func (ev BuildStartEvent) String() string { return "build-start" } -func (eq *EngDeriver) onBuildStart(ctx context.Context, ev BuildStartEvent) { - rpcCtx, cancel := context.WithTimeout(eq.ctx, buildStartTimeout) +func (e *EngineController) onBuildStart(ctx context.Context, ev BuildStartEvent) { + rpcCtx, cancel := context.WithTimeout(e.ctx, buildStartTimeout) defer cancel() if ev.Attributes.DerivedFrom != (eth.L1BlockRef{}) && - eq.ec.PendingSafeL2Head().Hash != ev.Attributes.Parent.Hash { + e.pendingSafeHead.Hash != ev.Attributes.Parent.Hash { // Warn about small reorgs, happens when pending safe head is getting rolled back - eq.log.Warn("block-attributes derived from L1 do not build on pending safe head, likely reorg", - "pending_safe", eq.ec.PendingSafeL2Head(), "attributes_parent", ev.Attributes.Parent) + e.log.Warn("block-attributes derived from L1 do not build on pending safe head, likely reorg", + "pending_safe", e.pendingSafeHead, "attributes_parent", ev.Attributes.Parent) } fcEvent := ForkchoiceUpdateEvent{ UnsafeL2Head: ev.Attributes.Parent, - SafeL2Head: eq.ec.safeHead, - FinalizedL2Head: eq.ec.finalizedHead, + SafeL2Head: e.safeHead, + FinalizedL2Head: e.finalizedHead, } if fcEvent.UnsafeL2Head.Number < fcEvent.FinalizedL2Head.Number { err := fmt.Errorf("invalid block-building pre-state, unsafe head %s is behind finalized head %s", fcEvent.UnsafeL2Head, fcEvent.FinalizedL2Head) - eq.emitter.Emit(ctx, rollup.CriticalErrorEvent{Err: err}) // make the node exit, things are very wrong. + e.emitter.Emit(ctx, rollup.CriticalErrorEvent{Err: err}) // make the node exit, things are very wrong. return } fc := eth.ForkchoiceState{ @@ -45,33 +45,33 @@ func (eq *EngDeriver) onBuildStart(ctx context.Context, ev BuildStartEvent) { FinalizedBlockHash: fcEvent.FinalizedL2Head.Hash, } buildStartTime := time.Now() - id, errTyp, err := startPayload(rpcCtx, eq.ec.engine, fc, ev.Attributes.Attributes) + id, errTyp, err := e.startPayload(rpcCtx, fc, ev.Attributes.Attributes) if err != nil { switch errTyp { case BlockInsertTemporaryErr: // RPC errors are recoverable, we can retry the buffered payload attributes later. - eq.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ Err: fmt.Errorf("temporarily cannot insert new safe block: %w", err), }) return case BlockInsertPrestateErr: - eq.emitter.Emit(ctx, rollup.ResetEvent{ + e.emitter.Emit(ctx, rollup.ResetEvent{ Err: fmt.Errorf("need reset to resolve pre-state problem: %w", err), }) return case BlockInsertPayloadErr: - eq.emitter.Emit(ctx, BuildInvalidEvent{Attributes: ev.Attributes, Err: err}) + e.emitter.Emit(ctx, BuildInvalidEvent{Attributes: ev.Attributes, Err: err}) return default: - eq.emitter.Emit(ctx, rollup.CriticalErrorEvent{ + e.emitter.Emit(ctx, rollup.CriticalErrorEvent{ Err: fmt.Errorf("unknown error type %d: %w", errTyp, err), }) return } } - eq.emitter.Emit(ctx, fcEvent) + e.emitter.Emit(ctx, fcEvent) - eq.emitter.Emit(ctx, BuildStartedEvent{ + e.emitter.Emit(ctx, BuildStartedEvent{ Info: eth.PayloadInfo{ID: id, Timestamp: uint64(ev.Attributes.Attributes.Timestamp)}, BuildStarted: buildStartTime, Concluding: ev.Attributes.Concluding, diff --git a/op-node/rollup/engine/build_started.go b/op-node/rollup/engine/build_started.go index c9a3a25695005..4b4b31be5c9e6 100644 --- a/op-node/rollup/engine/build_started.go +++ b/op-node/rollup/engine/build_started.go @@ -24,10 +24,10 @@ func (ev BuildStartedEvent) String() string { return "build-started" } -func (eq *EngDeriver) onBuildStarted(ctx context.Context, ev BuildStartedEvent) { +func (e *EngineController) onBuildStarted(ctx context.Context, ev BuildStartedEvent) { // If a (pending) safe block, immediately seal the block if ev.DerivedFrom != (eth.L1BlockRef{}) { - eq.emitter.Emit(ctx, BuildSealEvent{ + e.emitter.Emit(ctx, BuildSealEvent{ Info: ev.Info, BuildStarted: ev.BuildStarted, Concluding: ev.Concluding, diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index e3b18cfd77567..3a6b871b741ff 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" + opmetrics "github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" @@ -37,6 +38,24 @@ const ( var ErrNoFCUNeeded = errors.New("no FCU call was needed") +// Max memory used for buffering unsafe payloads +const maxUnsafePayloadsMemory = 500 * 1024 * 1024 + +// ResetEngineRequestEvent requests the EngineController to walk +// the L2 chain backwards until it finds a plausible unsafe head, +// and find an L2 safe block that is guaranteed to still be from the L1 chain. +// This event is not used in interop. +type ResetEngineRequestEvent struct { +} + +func (ev ResetEngineRequestEvent) String() string { + return "reset-engine-request" +} + +type Engine interface { + ExecEngine + derive.L2Source +} type ExecEngine interface { GetPayload(ctx context.Context, payloadInfo eth.PayloadInfo) (*eth.ExecutionPayloadEnvelope, error) ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) @@ -45,9 +64,32 @@ type ExecEngine interface { L2BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L2BlockRef, error) } -type ECMetrics interface { - derive.Metrics - RecordL2Ref(name string, ref eth.L2BlockRef) +// Metrics interface for CLSync functionality +type Metrics interface { + RecordUnsafePayloadsBuffer(length uint64, memSize uint64, next eth.BlockID) +} + +type SyncDeriver interface { + OnELSyncStarted() +} + +type AttributesForceResetter interface { + ForceReset(ctx context.Context, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) +} + +type PipelineForceResetter interface { + ResetPipeline() +} + +type OriginSelectorForceResetter interface { + ResetOrigins() +} + +// CrossUpdateHandler handles both cross-unsafe and cross-safe L2 head changes. +// Nil check required because op-program omits this handler. +type CrossUpdateHandler interface { + OnCrossUnsafeUpdate(ctx context.Context, crossUnsafe eth.L2BlockRef, localUnsafe eth.L2BlockRef) + OnCrossSafeUpdate(ctx context.Context, crossSafe eth.L2BlockRef, localSafe eth.L2BlockRef) } type DACClient interface { @@ -57,7 +99,7 @@ type DACClient interface { type EngineController struct { engine ExecEngine // Underlying execution engine RPC log log.Logger - metrics ECMetrics + metrics opmetrics.Metricer syncCfg *sync.Config syncStatus syncStatusEnum chainSpec *rollup.ChainSpec @@ -65,6 +107,10 @@ type EngineController struct { elStart time.Time clock clock.Clock + // L1 chain for reset functionality + l1 sync.L1Chain + + ctx context.Context emitter event.Emitter // To lock the engine RPC usage, such that components like the API, which need direct access, can protect their access. @@ -98,10 +144,27 @@ type EngineController struct { // because engine may forgot backupUnsafeHead or backupUnsafeHead is not part // of the chain. needFCUCallForBackupUnsafeReorg bool + + // For clearing safe head db when EL sync started + // EngineController is first initialized and used to initialize SyncDeriver. + // Embed SyncDeriver into EngineController after initializing SyncDeriver + SyncDeriver SyncDeriver + + // Components that need to be notified during force reset + attributesResetter AttributesForceResetter + pipelineResetter PipelineForceResetter + originSelectorResetter OriginSelectorForceResetter + + // Handler for cross-unsafe and cross-safe updates + crossUpdateHandler CrossUpdateHandler + + unsafePayloads *PayloadsQueue // queue of unsafe payloads, ordered by ascending block number, may have gaps and duplicates } -func NewEngineController(engine ExecEngine, log log.Logger, metrics ECMetrics, - rollupCfg *rollup.Config, syncCfg *sync.Config, emitter event.Emitter, +var _ event.Deriver = (*EngineController)(nil) + +func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, m opmetrics.Metricer, + rollupCfg *rollup.Config, syncCfg *sync.Config, l1 sync.L1Chain, emitter event.Emitter, ) *EngineController { syncStatus := syncStatusCL if syncCfg.SyncMode == sync.ELSync { @@ -109,36 +172,29 @@ func NewEngineController(engine ExecEngine, log log.Logger, metrics ECMetrics, } return &EngineController{ - engine: engine, - log: log, - metrics: metrics, - chainSpec: rollup.NewChainSpec(rollupCfg), - rollupCfg: rollupCfg, - syncCfg: syncCfg, - syncStatus: syncStatus, - clock: clock.SystemClock, - emitter: emitter, + engine: engine, + log: log, + metrics: m, + chainSpec: rollup.NewChainSpec(rollupCfg), + rollupCfg: rollupCfg, + syncCfg: syncCfg, + syncStatus: syncStatus, + clock: clock.SystemClock, + l1: l1, + ctx: ctx, + emitter: emitter, + unsafePayloads: NewPayloadsQueue(log, maxUnsafePayloadsMemory, payloadMemSize), } } -// State Getters - func (e *EngineController) UnsafeL2Head() eth.L2BlockRef { return e.unsafeHead } -func (e *EngineController) CrossUnsafeL2Head() eth.L2BlockRef { - return e.crossUnsafeHead -} - func (e *EngineController) PendingSafeL2Head() eth.L2BlockRef { return e.pendingSafeHead } -func (e *EngineController) LocalSafeL2Head() eth.L2BlockRef { - return e.localSafeHead -} - func (e *EngineController) SafeL2Head() eth.L2BlockRef { return e.safeHead } @@ -151,11 +207,32 @@ func (e *EngineController) BackupUnsafeL2Head() eth.L2BlockRef { return e.backupUnsafeHead } +func (e *EngineController) RequestForkchoiceUpdate(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + + e.requestForkchoiceUpdate(ctx) +} + +func (e *EngineController) requestForkchoiceUpdate(ctx context.Context) { + e.emitter.Emit(ctx, ForkchoiceUpdateEvent{ + UnsafeL2Head: e.unsafeHead, + SafeL2Head: e.safeHead, + FinalizedL2Head: e.finalizedHead, + }) +} + func (e *EngineController) IsEngineSyncing() bool { - return e.syncStatus == syncStatusWillStartEL || e.syncStatus == syncStatusStartedEL || e.syncStatus == syncStatusFinishedELButNotFinalized + e.mu.Lock() + defer e.mu.Unlock() + return e.isEngineSyncing() } -// Setters +func (e *EngineController) isEngineSyncing() bool { + return e.syncStatus == syncStatusWillStartEL || + e.syncStatus == syncStatusStartedEL || + e.syncStatus == syncStatusFinishedELButNotFinalized +} // SetFinalizedHead implements LocalEngineControl. func (e *EngineController) SetFinalizedHead(r eth.L2BlockRef) { @@ -204,6 +281,24 @@ func (e *EngineController) SetBackupUnsafeL2Head(r eth.L2BlockRef, triggerReorg e.needFCUCallForBackupUnsafeReorg = triggerReorg } +func (e *EngineController) SetCrossUpdateHandler(handler CrossUpdateHandler) { + e.crossUpdateHandler = handler +} + +func (e *EngineController) onUnsafeUpdate(ctx context.Context, crossUnsafe, localUnsafe eth.L2BlockRef) { + // Nil check required because op-program omits this handler. + if e.crossUpdateHandler != nil { + e.crossUpdateHandler.OnCrossUnsafeUpdate(ctx, crossUnsafe, localUnsafe) + } +} + +func (e *EngineController) onSafeUpdate(ctx context.Context, crossSafe, localSafe eth.L2BlockRef) { + // Nil check required because op-program omits this handler. + if e.crossUpdateHandler != nil { + e.crossUpdateHandler.OnCrossSafeUpdate(ctx, crossSafe, localSafe) + } +} + // logSyncProgressMaybe helps log forkchoice state-changes when applicable. // First, the pre-state is registered. // A callback is returned to then log the changes to the pre-state, if any. @@ -242,7 +337,7 @@ func (e *EngineController) logSyncProgressMaybe() func() { "l2_pending_safe", e.pendingSafeHead, "l2_unsafe", e.unsafeHead, "l2_backup_unsafe", e.backupUnsafeHead, - "l2_time", e.UnsafeL2Head().Time, + "l2_time", e.unsafeHead.Time, ) } } @@ -326,13 +421,11 @@ func (e *EngineController) initializeUnknowns(ctx context.Context) error { return nil } -// TryUpdateEngine attempts to update the engine with the current forkchoice state of the rollup node, -// this is a no-op if the nodes already agree on the forkchoice state. -func (e *EngineController) TryUpdateEngine(ctx context.Context) error { +func (e *EngineController) tryUpdateEngineInternal(ctx context.Context) error { if !e.needFCUCall { return ErrNoFCUNeeded } - if e.IsEngineSyncing() { + if e.isEngineSyncing() { e.log.Warn("Attempting to update forkchoice state while EL syncing") } if err := e.initializeUnknowns(ctx); err != nil { @@ -365,11 +458,7 @@ func (e *EngineController) TryUpdateEngine(ctx context.Context) error { } } if fcRes.PayloadStatus.Status == eth.ExecutionValid { - e.emitter.Emit(ctx, ForkchoiceUpdateEvent{ - UnsafeL2Head: e.unsafeHead, - SafeL2Head: e.safeHead, - FinalizedL2Head: e.finalizedHead, - }) + e.requestForkchoiceUpdate(ctx) } if e.unsafeHead == e.safeHead && e.safeHead == e.pendingSafeHead { // Remove backupUnsafeHead because this backup will be never used after consolidation. @@ -379,7 +468,31 @@ func (e *EngineController) TryUpdateEngine(ctx context.Context) error { return nil } +// tryUpdateEngine attempts to update the engine with the current forkchoice state of the rollup node, +// this is a no-op if the nodes already agree on the forkchoice state. +func (e *EngineController) tryUpdateEngine(ctx context.Context) { + // If we don't need to call FCU, keep going b/c this was a no-op. If we needed to + // perform a network call, then we should yield even if we did not encounter an error. + if err := e.tryUpdateEngineInternal(e.ctx); err != nil && !errors.Is(err, ErrNoFCUNeeded) { + if errors.Is(err, derive.ErrReset) { + e.emitter.Emit(ctx, rollup.ResetEvent{Err: err}) + } else if errors.Is(err, derive.ErrTemporary) { + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) + } else { + e.emitter.Emit(ctx, rollup.CriticalErrorEvent{ + Err: fmt.Errorf("unexpected tryUpdateEngine error type: %w", err), + }) + } + } +} + func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error { + e.mu.Lock() + defer e.mu.Unlock() + return e.insertUnsafePayload(ctx, envelope, ref) +} + +func (e *EngineController) insertUnsafePayload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope, ref eth.L2BlockRef) error { // Check if there is a finalized head once when doing EL sync. If so, transition to CL sync if e.syncStatus == syncStatusWillStartEL { b, err := e.engine.L2BlockRefByLabel(ctx, eth.Finalized) @@ -388,7 +501,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et e.syncStatus = syncStatusStartedEL e.log.Info("Starting EL sync") e.elStart = e.clock.Now() - e.emitter.Emit(ctx, ELSyncStartedEvent{}) + e.SyncDeriver.OnELSyncStarted() } else if err == nil { e.syncStatus = syncStatusFinishedEL e.log.Info("Skipping EL sync and going straight to CL sync because there is a finalized block", "id", b.ID()) @@ -429,7 +542,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et e.emitter.Emit(ctx, UnsafeUpdateEvent{Ref: ref}) e.SetLocalSafeHead(ref) e.SetSafeHead(ref) - e.emitter.Emit(ctx, CrossSafeUpdateEvent{LocalSafe: ref, CrossSafe: ref}) + e.onSafeUpdate(ctx, ref, ref) e.SetFinalizedHead(ref) } logFn := e.logSyncProgressMaybe() @@ -465,11 +578,7 @@ func (e *EngineController) InsertUnsafePayload(ctx context.Context, envelope *et } if fcRes.PayloadStatus.Status == eth.ExecutionValid { - e.emitter.Emit(ctx, ForkchoiceUpdateEvent{ - UnsafeL2Head: e.unsafeHead, - SafeL2Head: e.safeHead, - FinalizedL2Head: e.finalizedHead, - }) + e.requestForkchoiceUpdate(ctx) } totalTime := fcu2Finish.Sub(newPayloadStart) @@ -492,11 +601,11 @@ func (e *EngineController) shouldTryBackupUnsafeReorg() bool { return false } // This method must be never called when EL sync. If EL sync is in progress, early return. - if e.IsEngineSyncing() { + if e.isEngineSyncing() { e.log.Warn("Attempting to unsafe reorg using backupUnsafe while EL syncing") return false } - if e.BackupUnsafeL2Head() == (eth.L2BlockRef{}) { // sanity check backupUnsafeHead is there + if e.backupUnsafeHead == (eth.L2BlockRef{}) { // sanity check backupUnsafeHead is there e.log.Warn("Attempting to unsafe reorg using backupUnsafe even though it is empty") e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) return false @@ -504,9 +613,15 @@ func (e *EngineController) shouldTryBackupUnsafeReorg() bool { return true } -// TryBackupUnsafeReorg attempts to reorg(restore) unsafe head to backupUnsafeHead. -// If succeeds, update current forkchoice state to the rollup node. func (e *EngineController) TryBackupUnsafeReorg(ctx context.Context) (bool, error) { + e.mu.Lock() + defer e.mu.Unlock() + return e.tryBackupUnsafeReorg(ctx) +} + +// tryBackupUnsafeReorg attempts to reorg(restore) unsafe head to backupUnsafeHead. +// If succeeds, update current forkchoice state to the rollup node. +func (e *EngineController) tryBackupUnsafeReorg(ctx context.Context) (bool, error) { if !e.shouldTryBackupUnsafeReorg() { // Do not need to perform FCU. return false, nil @@ -546,15 +661,12 @@ func (e *EngineController) TryBackupUnsafeReorg(ctx context.Context) (bool, erro } } if fcRes.PayloadStatus.Status == eth.ExecutionValid { - e.emitter.Emit(ctx, ForkchoiceUpdateEvent{ - UnsafeL2Head: e.backupUnsafeHead, - SafeL2Head: e.safeHead, - FinalizedL2Head: e.finalizedHead, - }) // Execution engine accepted the reorg. e.log.Info("successfully reorged unsafe head using backupUnsafe", "unsafe", e.backupUnsafeHead.ID()) - e.SetUnsafeHead(e.BackupUnsafeL2Head()) + e.SetUnsafeHead(e.backupUnsafeHead) e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) + + e.requestForkchoiceUpdate(ctx) return true, nil } e.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) @@ -562,3 +674,398 @@ func (e *EngineController) TryBackupUnsafeReorg(ctx context.Context) (bool, erro return true, derive.NewTemporaryError(fmt.Errorf("cannot restore unsafe chain using backupUnsafe: err: %w", eth.ForkchoiceUpdateErr(fcRes.PayloadStatus))) } + +func (e *EngineController) TryUpdateEngine(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + e.tryUpdateEngine(ctx) +} + +func (e *EngineController) OnEvent(ctx context.Context, ev event.Event) bool { + e.mu.Lock() + defer e.mu.Unlock() + // TODO(#16917) Remove Event System Refactor Comments + // PromoteUnsafeEvent, PromotePendingSafeEvent, PromoteLocalSafeEvent fan out is updated to procedural + // PromoteSafeEvent fan out is updated to procedural PromoteSafe method call + switch x := ev.(type) { + case UnsafeUpdateEvent: + // pre-interop everything that is local-unsafe is also immediately cross-unsafe. + if !e.rollupCfg.IsInterop(x.Ref.Time) { + e.emitter.Emit(ctx, PromoteCrossUnsafeEvent(x)) + } + // Try to apply the forkchoice changes + e.tryUpdateEngine(ctx) + case PromoteCrossUnsafeEvent: + e.SetCrossUnsafeHead(x.Ref) + e.onUnsafeUpdate(ctx, x.Ref, e.unsafeHead) + case LocalSafeUpdateEvent: + // pre-interop everything that is local-safe is also immediately cross-safe. + if !e.rollupCfg.IsInterop(x.Ref.Time) { + e.PromoteSafe(ctx, x.Ref, x.Source) + } + case InteropInvalidateBlockEvent: + e.emitter.Emit(ctx, BuildStartEvent{Attributes: x.Attributes}) + case BuildStartEvent: + e.onBuildStart(ctx, x) + case BuildStartedEvent: + e.onBuildStarted(ctx, x) + case BuildSealEvent: + e.onBuildSeal(ctx, x) + case BuildSealedEvent: + e.onBuildSealed(ctx, x) + case BuildInvalidEvent: + e.onBuildInvalid(ctx, x) + case BuildCancelEvent: + e.onBuildCancel(ctx, x) + case PayloadProcessEvent: + e.onPayloadProcess(ctx, x) + case PayloadSuccessEvent: + e.onPayloadSuccess(ctx, x) + case PayloadInvalidEvent: + e.onInvalidPayload(x) + case ForkchoiceUpdateEvent: + e.onForkchoiceUpdate(ctx, x) + case ResetEngineRequestEvent: + e.onResetEngineRequest(ctx) + default: + return false + } + return true +} + +func (e *EngineController) RequestPendingSafeUpdate(ctx context.Context) { + e.mu.Lock() + defer e.mu.Unlock() + e.emitter.Emit(ctx, PendingSafeUpdateEvent{ + PendingSafe: e.pendingSafeHead, + Unsafe: e.unsafeHead, + }) +} + +// TryUpdatePendingSafe updates the pending safe head if the new reference is newer, acquiring lock +func (e *EngineController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + e.mu.Lock() + defer e.mu.Unlock() + e.tryUpdatePendingSafe(ctx, ref, concluding, source) +} + +// tryUpdatePendingSafe updates the pending safe head if the new reference is newer +func (e *EngineController) tryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + // Only promote if not already stale. + // Resets/overwrites happen through engine-resets, not through promotion. + if ref.Number > e.pendingSafeHead.Number { + e.log.Debug("Updating pending safe", "pending_safe", ref, "local_safe", e.localSafeHead, "unsafe", e.unsafeHead, "concluding", concluding) + e.SetPendingSafeL2Head(ref) + e.emitter.Emit(ctx, PendingSafeUpdateEvent{ + PendingSafe: e.pendingSafeHead, + Unsafe: e.unsafeHead, + }) + } +} + +// TryUpdateLocalSafe updates the local safe head if the new reference is newer and concluding, acquiring lock +func (e *EngineController) TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + e.mu.Lock() + defer e.mu.Unlock() + e.tryUpdateLocalSafe(ctx, ref, concluding, source) +} + +// tryUpdateLocalSafe updates the local safe head if the new reference is newer and concluding +func (e *EngineController) tryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { + if concluding && ref.Number > e.localSafeHead.Number { + // Promote to local safe + e.log.Debug("Updating local safe", "local_safe", ref, "safe", e.safeHead, "unsafe", e.unsafeHead) + e.SetLocalSafeHead(ref) + e.emitter.Emit(ctx, LocalSafeUpdateEvent{Ref: ref, Source: source}) + } +} + +// TryUpdateUnsafe updates the unsafe head and backs up the previous one if needed +func (e *EngineController) tryUpdateUnsafe(ctx context.Context, ref eth.L2BlockRef) { + // Backup unsafeHead when new block is not built on original unsafe head. + if e.unsafeHead.Number >= ref.Number { + e.SetBackupUnsafeL2Head(e.unsafeHead, false) + } + e.SetUnsafeHead(ref) + e.emitter.Emit(ctx, UnsafeUpdateEvent{Ref: ref}) +} + +func (e *EngineController) PromoteSafe(ctx context.Context, ref eth.L2BlockRef, source eth.L1BlockRef) { + e.log.Debug("Updating safe", "safe", ref, "unsafe", e.unsafeHead) + e.SetSafeHead(ref) + // Finalizer can pick up this safe cross-block now + e.emitter.Emit(ctx, SafeDerivedEvent{Safe: ref, Source: source}) + e.onSafeUpdate(ctx, e.safeHead, e.localSafeHead) + if ref.Number > e.crossUnsafeHead.Number { + e.log.Debug("Cross Unsafe Head is stale, updating to match cross safe", "cross_unsafe", e.crossUnsafeHead, "cross_safe", ref) + e.SetCrossUnsafeHead(ref) + e.onUnsafeUpdate(ctx, ref, e.unsafeHead) + } + // Try to apply the forkchoice changes + e.tryUpdateEngine(ctx) +} + +func (e *EngineController) PromoteFinalized(ctx context.Context, ref eth.L2BlockRef) { + e.mu.Lock() + defer e.mu.Unlock() + e.promoteFinalized(ctx, ref) +} +func (e *EngineController) promoteFinalized(ctx context.Context, ref eth.L2BlockRef) { + if ref.Number < e.finalizedHead.Number { + e.log.Error("Cannot rewind finality,", "ref", ref, "finalized", e.finalizedHead) + return + } + if ref.Number > e.safeHead.Number { + e.log.Error("Block must be safe before it can be finalized", "ref", ref, "safe", e.safeHead) + return + } + e.SetFinalizedHead(ref) + e.emitter.Emit(ctx, FinalizedUpdateEvent{Ref: ref}) + // Try to apply the forkchoice changes + e.tryUpdateEngine(ctx) +} + +// SetAttributesResetter sets the attributes component that needs force reset notifications +func (e *EngineController) SetAttributesResetter(resetter AttributesForceResetter) { + e.attributesResetter = resetter +} + +// SetPipelineResetter sets the pipeline component that needs force reset notifications +func (e *EngineController) SetPipelineResetter(resetter PipelineForceResetter) { + e.pipelineResetter = resetter +} + +// SetOriginSelectorResetter sets the origin selector component that needs force reset notifications +func (e *EngineController) SetOriginSelectorResetter(resetter OriginSelectorForceResetter) { + e.originSelectorResetter = resetter +} + +// ForceReset performs a forced reset to the specified block references, acquiring lock +func (e *EngineController) ForceReset(ctx context.Context, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) { + e.mu.Lock() + defer e.mu.Unlock() + e.forceReset(ctx, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized) +} + +// forceReset performs a forced reset to the specified block references +func (e *EngineController) forceReset(ctx context.Context, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) { + // Reset other components before resetting the engine + if e.attributesResetter != nil { + e.attributesResetter.ForceReset(ctx, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized) + } + if e.pipelineResetter != nil { + e.pipelineResetter.ResetPipeline() + } + // originSelectorResetter is only present when sequencing is enabled + if e.originSelectorResetter != nil { + e.originSelectorResetter.ResetOrigins() + } + + ForceEngineReset(e, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized) + + if e.pipelineResetter != nil { + e.emitter.Emit(ctx, derive.ConfirmPipelineResetEvent{}) + } + + // Time to apply the changes to the underlying engine + e.tryUpdateEngine(ctx) + + v := EngineResetConfirmedEvent{ + LocalUnsafe: e.unsafeHead, + CrossUnsafe: e.crossUnsafeHead, + LocalSafe: e.localSafeHead, + CrossSafe: e.safeHead, + Finalized: e.finalizedHead, + } + // We do not emit the original event values, since those might not be set (optional attributes). + e.emitter.Emit(ctx, v) + e.log.Info("Reset of Engine is completed", + "local_unsafe", v.LocalUnsafe, + "cross_unsafe", v.CrossUnsafe, + "local_safe", v.LocalSafe, + "cross_safe", v.CrossSafe, + "finalized", v.Finalized, + ) +} + +// LowestQueuedUnsafeBlock retrieves the first queued-up L2 unsafe payload, or a zeroed reference if there is none. +func (e *EngineController) LowestQueuedUnsafeBlock() eth.L2BlockRef { + payload := e.unsafePayloads.Peek() + if payload == nil { + return eth.L2BlockRef{} + } + ref, err := derive.PayloadToBlockRef(e.rollupCfg, payload.ExecutionPayload) + if err != nil { + return eth.L2BlockRef{} + } + return ref +} + +// onInvalidPayload checks if the first next-up payload matches the invalid payload. +// If so, the payload is dropped, to give the next payloads a try. +func (e *EngineController) onInvalidPayload(x PayloadInvalidEvent) { + e.log.Debug("Received invalid payload report", "block", x.Envelope.ExecutionPayload.ID(), + "err", x.Err, "timestamp", uint64(x.Envelope.ExecutionPayload.Timestamp)) + + block := x.Envelope.ExecutionPayload + if peek := e.unsafePayloads.Peek(); peek != nil && + block.BlockHash == peek.ExecutionPayload.BlockHash { + e.log.Warn("Dropping invalid unsafe payload", + "hash", block.BlockHash, "number", uint64(block.BlockNumber), + "timestamp", uint64(block.Timestamp)) + e.unsafePayloads.Pop() + } +} + +// onForkchoiceUpdate refreshes unsafe payload queue and peeks at the next applicable unsafe payload, if any, +// to apply on top of the received forkchoice pre-state. +// The payload is held on to until the forkchoice changes (success case) or the payload is reported to be invalid. +func (e *EngineController) onForkchoiceUpdate(ctx context.Context, event ForkchoiceUpdateEvent) { + e.log.Debug("Received forkchoice update", + "unsafe", event.UnsafeL2Head, "safe", event.SafeL2Head, "finalized", event.FinalizedL2Head) + + e.unsafePayloads.DropInapplicableUnsafePayloads(event) + nextEnvelope := e.unsafePayloads.Peek() + if nextEnvelope == nil { + e.log.Debug("No unsafe payload to process") + return + } + + // Only process the next payload if it is applicable on top of the current unsafe head. + // This avoids prematurely attempting to insert non-adjacent payloads (e.g. height gaps), + // which could otherwise trigger EL sync behavior. + refParentHash := nextEnvelope.ExecutionPayload.ParentHash + refBlockNumber := uint64(nextEnvelope.ExecutionPayload.BlockNumber) + if refParentHash != event.UnsafeL2Head.Hash || refBlockNumber != event.UnsafeL2Head.Number+1 { + e.log.Debug("Next unsafe payload is not applicable yet", + "nextHash", nextEnvelope.ExecutionPayload.BlockHash, "nextNumber", refBlockNumber, "unsafe", event.UnsafeL2Head) + return + } + + // We don't pop from the queue. If there is a temporary error then we can retry. + // Upon next forkchoice update or invalid-payload event we can remove it from the queue. + e.processUnsafePayload(ctx, nextEnvelope) +} + +// processUnsafePayload processes an unsafe payload by inserting it into the engine. +func (e *EngineController) processUnsafePayload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) { + ref, err := derive.PayloadToBlockRef(e.rollupCfg, envelope.ExecutionPayload) + if err != nil { + e.log.Error("failed to decode L2 block ref from payload", "err", err) + return + } + // Avoid re-processing the same unsafe payload if it has already been processed. Because a FCU event calls processUnsafePayload + // it is possible to have multiple queued up calls for the same L2 block. This becomes an issue when processing + // a large number of unsafe payloads at once (like when iterating through the payload queue after the safe head has advanced). + if ref.BlockRef().ID() == e.unsafeHead.BlockRef().ID() { + return + } + if err := e.insertUnsafePayload(e.ctx, envelope, ref); err != nil { + e.log.Info("failed to insert payload", "ref", ref, + "txs", len(envelope.ExecutionPayload.Transactions), "err", err) + // yes, duplicate error-handling. After all derivers are interacting with the engine + // through events, we can drop the engine-controller interface: + // unify the events handler with the engine-controller, + // remove a lot of code, and not do this error translation. + if errors.Is(err, derive.ErrReset) { + e.emitter.Emit(ctx, rollup.ResetEvent{Err: err}) + } else if errors.Is(err, derive.ErrTemporary) { + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) + } else { + e.emitter.Emit(ctx, rollup.CriticalErrorEvent{ + Err: fmt.Errorf("unexpected InsertUnsafePayload error type: %w", err), + }) + } + } else { + e.log.Info("successfully processed payload", "ref", ref, "txs", len(envelope.ExecutionPayload.Transactions)) + } +} + +// AddUnsafePayload schedules an execution payload to be processed, ahead of deriving it from L1. +func (e *EngineController) AddUnsafePayload(ctx context.Context, envelope *eth.ExecutionPayloadEnvelope) { + if envelope == nil { + e.log.Error("AddUnsafePayload cannot add nil unsafe payload") + return + } + e.mu.Lock() + defer e.mu.Unlock() + + e.log.Debug("Received payload", "payload", envelope.ExecutionPayload.ID()) + + if err := e.unsafePayloads.Push(envelope); err != nil { + e.log.Warn("Could not add unsafe payload", "id", envelope.ExecutionPayload.ID(), "timestamp", uint64(envelope.ExecutionPayload.Timestamp), "err", err) + return + } + p := e.unsafePayloads.Peek() + e.metrics.RecordUnsafePayloadsBuffer(uint64(e.unsafePayloads.Len()), e.unsafePayloads.MemSize(), p.ExecutionPayload.ID()) + e.log.Trace("Next unsafe payload to process", "next", p.ExecutionPayload.ID(), "timestamp", uint64(p.ExecutionPayload.Timestamp)) + + // request forkchoice update directly so we can process the payload + e.requestForkchoiceUpdate(ctx) +} + +// onResetEngineRequest handles the ResetEngineRequestEvent by finding L2 heads and performing a force reset +func (e *EngineController) onResetEngineRequest(ctx context.Context) { + result, err := sync.FindL2Heads(e.ctx, e.rollupCfg, e.l1, e.engine, e.log, e.syncCfg) + if err != nil { + e.emitter.Emit(ctx, rollup.ResetEvent{ + Err: fmt.Errorf("failed to find the L2 Heads to start from: %w", err), + }) + return + } + e.forceReset(ctx, result.Unsafe, result.Unsafe, result.Safe, result.Safe, result.Finalized) +} + +var ErrEngineSyncing = errors.New("engine is syncing") + +type BlockInsertionErrType uint + +const ( + // BlockInsertOK indicates that the payload was successfully executed and appended to the canonical chain. + BlockInsertOK BlockInsertionErrType = iota + // BlockInsertTemporaryErr indicates that the insertion failed but may succeed at a later time without changes to the payload. + BlockInsertTemporaryErr + // BlockInsertPrestateErr indicates that the pre-state to insert the payload could not be prepared, e.g. due to missing chain data. + BlockInsertPrestateErr + // BlockInsertPayloadErr indicates that the payload was invalid and cannot become canonical. + BlockInsertPayloadErr +) + +// startPayload starts an execution payload building process in the engine, with the given attributes. +// The severity of the error is distinguished to determine whether the same payload attributes may be re-attempted later. +func (e *EngineController) startPayload(ctx context.Context, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes) (id eth.PayloadID, errType BlockInsertionErrType, err error) { + fcRes, err := e.engine.ForkchoiceUpdate(ctx, &fc, attrs) + if err != nil { + var rpcErr rpc.Error + if errors.As(err, &rpcErr) { + switch code := eth.ErrorCode(rpcErr.ErrorCode()); code { + case eth.InvalidForkchoiceState: + return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("pre-block-creation forkchoice update was inconsistent with engine, need reset to resolve: %w", err) + case eth.InvalidPayloadAttributes: + return eth.PayloadID{}, BlockInsertPayloadErr, fmt.Errorf("payload attributes are not valid, cannot build block: %w", err) + default: + if code.IsEngineError() { + return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("unexpected engine error code in forkchoice-updated response: %w", err) + } + return eth.PayloadID{}, BlockInsertTemporaryErr, fmt.Errorf("unexpected generic error code in forkchoice-updated response: %w", err) + } + } + + return eth.PayloadID{}, BlockInsertTemporaryErr, fmt.Errorf("failed to create new block via forkchoice: %w", err) + } + + switch fcRes.PayloadStatus.Status { + // TODO: snap sync - specify explicit different error type if node is syncing + case eth.ExecutionInvalid, eth.ExecutionInvalidBlockHash: + return eth.PayloadID{}, BlockInsertPayloadErr, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus) + case eth.ExecutionValid: + if fcRes.PayloadID == nil { + return eth.PayloadID{}, BlockInsertTemporaryErr, errors.New("nil id in forkchoice result when expecting a valid ID") + } + return *fcRes.PayloadID, BlockInsertOK, nil + case eth.ExecutionSyncing: + return eth.PayloadID{}, BlockInsertTemporaryErr, ErrEngineSyncing + default: + return eth.PayloadID{}, BlockInsertTemporaryErr, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus) + } +} diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go new file mode 100644 index 0000000000000..481fafc2469b4 --- /dev/null +++ b/op-node/rollup/engine/engine_controller_test.go @@ -0,0 +1,210 @@ +package engine + +import ( + "context" + "math/big" + mrand "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-node/metrics" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-service/testutils" + "github.com/ethereum/go-ethereum/common" + gethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" +) + +func TestInvalidPayloadDropsHead(t *testing.T) { + emitter := &testutils.MockEmitter{} + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{}, &testutils.MockL1Source{}, emitter) + + payload := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ + BlockHash: common.Hash{0x01}, + }} + + emitter.ExpectOnce(PayloadInvalidEvent{}) + emitter.ExpectOnce(ForkchoiceUpdateEvent{}) + + // Add an unsafe payload requests a forkchoice update via engine controller + ec.AddUnsafePayload(context.Background(), payload) + + require.NotNil(t, ec.unsafePayloads.Peek()) + + // Mark it invalid; it should be dropped if it matches the queue head + ec.OnEvent(context.Background(), PayloadInvalidEvent{Envelope: payload}) + require.Nil(t, ec.unsafePayloads.Peek()) +} + +// buildSimpleCfgAndPayload creates a minimal rollup config and a valid payload (A1) on top of A0. +func buildSimpleCfgAndPayload(t *testing.T) (*rollup.Config, eth.L2BlockRef, eth.L2BlockRef, *eth.ExecutionPayloadEnvelope) { + t.Helper() + rng := mrand.New(mrand.NewSource(1234)) + refA := testutils.RandomBlockRef(rng) + + refA0 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: 0, + ParentHash: common.Hash{}, + Time: refA.Time, + L1Origin: refA.ID(), + SequenceNumber: 0, + } + + cfg := &rollup.Config{ + Genesis: rollup.Genesis{ + L1: refA.ID(), + L2: refA0.ID(), + L2Time: refA0.Time, + SystemConfig: eth.SystemConfig{ + BatcherAddr: common.Address{42}, + Overhead: [32]byte{123}, + Scalar: [32]byte{42}, + GasLimit: 20_000_000, + }, + }, + BlockTime: 1, + SeqWindowSize: 2, + } + + refA1 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), + Number: refA0.Number + 1, + ParentHash: refA0.Hash, + Time: refA0.Time + cfg.BlockTime, + L1Origin: refA.ID(), + SequenceNumber: 1, + } + + // Populate necessary L1 info fields + aL1Info := &testutils.MockBlockInfo{ + InfoParentHash: refA.ParentHash, + InfoNum: refA.Number, + InfoTime: refA.Time, + InfoHash: refA.Hash, + InfoBaseFee: big.NewInt(1), + InfoBlobBaseFee: big.NewInt(1), + InfoReceiptRoot: gethtypes.EmptyRootHash, + InfoRoot: testutils.RandomHash(rng), + InfoGasUsed: rng.Uint64(), + } + a1L1Info, err := derive.L1InfoDepositBytes(cfg, params.SepoliaChainConfig, cfg.Genesis.SystemConfig, refA1.SequenceNumber, aL1Info, refA1.Time) + require.NoError(t, err) + + payloadA1 := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ + ParentHash: refA1.ParentHash, + BlockNumber: eth.Uint64Quantity(refA1.Number), + Timestamp: eth.Uint64Quantity(refA1.Time), + BlockHash: refA1.Hash, + Transactions: []eth.Data{a1L1Info}, + }} + return cfg, refA0, refA1, payloadA1 +} + +func TestOnUnsafePayload_EnqueueEmit(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, &testutils.MockL1Source{}, emitter) + + emitter.ExpectOnce(PayloadInvalidEvent{}) + emitter.ExpectOnce(ForkchoiceUpdateEvent{}) + + ec.AddUnsafePayload(context.Background(), payloadA1) + + got := ec.unsafePayloads.Peek() + require.NotNil(t, got) + require.Equal(t, payloadA1, got) +} + +func TestOnForkchoiceUpdate_ProcessRetryAndPop(t *testing.T) { + cfg, refA0, refA1, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + mockEngine := &testutils.MockEngine{} + cl := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, &testutils.MockL1Source{}, emitter) + + // queue payload A1 + emitter.ExpectOnceType("UnsafeUpdateEvent") + emitter.ExpectOnceType("PayloadInvalidEvent") + emitter.ExpectOnceType("ForkchoiceUpdateEvent") + emitter.ExpectOnceType("ForkchoiceUpdateEvent") + cl.AddUnsafePayload(context.Background(), payloadA1) + + // applicable forkchoice -> process once + mockEngine.ExpectGetPayload(eth.PayloadID{}, payloadA1, nil) + mockEngine.ExpectNewPayload(payloadA1.ExecutionPayload, nil, ð.PayloadStatusV1{Status: eth.ExecutionValid}, nil) + mockEngine.ExpectForkchoiceUpdate(ð.ForkchoiceState{HeadBlockHash: refA1.Hash, SafeBlockHash: common.Hash{}, FinalizedBlockHash: common.Hash{}}, nil, ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}}, nil) + cl.OnEvent(context.Background(), ForkchoiceUpdateEvent{UnsafeL2Head: refA0, SafeL2Head: refA0, FinalizedL2Head: refA0}) + require.NotNil(t, cl.unsafePayloads.Peek(), "should not pop yet") + + // same forkchoice -> retry + cl.OnEvent(context.Background(), ForkchoiceUpdateEvent{UnsafeL2Head: refA0, SafeL2Head: refA0, FinalizedL2Head: refA0}) + require.NotNil(t, cl.unsafePayloads.Peek(), "still pending") + + // after applied (unsafe head == A1) -> pop + cl.OnEvent(context.Background(), ForkchoiceUpdateEvent{UnsafeL2Head: refA1, SafeL2Head: refA0, FinalizedL2Head: refA0}) + require.Nil(t, cl.unsafePayloads.Peek()) +} + +func TestLowestQueuedUnsafeBlock(t *testing.T) { + cfg, _, _, payloadA1 := buildSimpleCfgAndPayload(t) + + emitter := &testutils.MockEmitter{} + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, &testutils.MockL1Source{}, emitter) + + // empty -> zero + require.Equal(t, eth.L2BlockRef{}, ec.LowestQueuedUnsafeBlock()) + + // queue -> returns derived ref + _ = ec.unsafePayloads.Push(payloadA1) + want, err := derive.PayloadToBlockRef(cfg, payloadA1.ExecutionPayload) + require.NoError(t, err) + require.Equal(t, want, ec.LowestQueuedUnsafeBlock()) +} + +func TestLowestQueuedUnsafeBlock_OnDeriveErrorReturnsZero(t *testing.T) { + // missing L1-info in txs will cause derive error + emitter := &testutils.MockEmitter{} + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, &testutils.MockL1Source{}, emitter) + + bad := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{BlockNumber: 1, BlockHash: common.Hash{0xaa}}} + _ = ec.unsafePayloads.Push(bad) + require.Equal(t, eth.L2BlockRef{}, ec.LowestQueuedUnsafeBlock()) +} + +func TestInvalidPayloadForNonHead_NoDrop(t *testing.T) { + emitter := &testutils.MockEmitter{} + ec := NewEngineController(context.Background(), nil, testlog.Logger(t, 0), metrics.NoopMetrics, &rollup.Config{}, &sync.Config{SyncMode: sync.CLSync}, &testutils.MockL1Source{}, emitter) + + // Head payload (lower block number) + head := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ + BlockNumber: 1, + BlockHash: common.Hash{0x01}, + }} + // Non-head payload (higher block number) + other := ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{ + BlockNumber: 2, + BlockHash: common.Hash{0x02}, + }} + + emitter.ExpectOnce(PayloadInvalidEvent{}) + emitter.ExpectOnce(ForkchoiceUpdateEvent{}) + ec.AddUnsafePayload(context.Background(), head) + + emitter.ExpectOnce(PayloadInvalidEvent{}) + emitter.ExpectOnce(ForkchoiceUpdateEvent{}) + ec.AddUnsafePayload(context.Background(), other) + + // Invalidate non-head should not drop head + ec.OnEvent(context.Background(), PayloadInvalidEvent{Envelope: other}) + require.Equal(t, 2, ec.unsafePayloads.Len()) + require.Equal(t, head, ec.unsafePayloads.Peek()) +} + +// note: nil-envelope behavior is not tested to match current implementation diff --git a/op-node/rollup/engine/engine_reset.go b/op-node/rollup/engine/engine_reset.go deleted file mode 100644 index 9fcfe25f57c81..0000000000000 --- a/op-node/rollup/engine/engine_reset.go +++ /dev/null @@ -1,73 +0,0 @@ -package engine - -import ( - "context" - "fmt" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/event" -) - -// ResetEngineRequestEvent requests the EngineResetDeriver to walk -// the L2 chain backwards until it finds a plausible unsafe head, -// and find an L2 safe block that is guaranteed to still be from the L1 chain. -// This event is not used in interop. -type ResetEngineRequestEvent struct { -} - -func (ev ResetEngineRequestEvent) String() string { - return "reset-engine-request" -} - -type EngineResetDeriver struct { - ctx context.Context - log log.Logger - cfg *rollup.Config - l1 sync.L1Chain - l2 sync.L2Chain - syncCfg *sync.Config - - emitter event.Emitter -} - -func NewEngineResetDeriver(ctx context.Context, log log.Logger, cfg *rollup.Config, - l1 sync.L1Chain, l2 sync.L2Chain, syncCfg *sync.Config) *EngineResetDeriver { - return &EngineResetDeriver{ - ctx: ctx, - log: log, - cfg: cfg, - l1: l1, - l2: l2, - syncCfg: syncCfg, - } -} - -func (d *EngineResetDeriver) AttachEmitter(em event.Emitter) { - d.emitter = em -} - -func (d *EngineResetDeriver) OnEvent(ctx context.Context, ev event.Event) bool { - switch ev.(type) { - case ResetEngineRequestEvent: - result, err := sync.FindL2Heads(d.ctx, d.cfg, d.l1, d.l2, d.log, d.syncCfg) - if err != nil { - d.emitter.Emit(ctx, rollup.ResetEvent{ - Err: fmt.Errorf("failed to find the L2 Heads to start from: %w", err), - }) - return true - } - d.emitter.Emit(ctx, rollup.ForceResetEvent{ - LocalUnsafe: result.Unsafe, - CrossUnsafe: result.Unsafe, - LocalSafe: result.Safe, - CrossSafe: result.Safe, - Finalized: result.Finalized, - }) - default: - return false - } - return true -} diff --git a/op-node/rollup/engine/engine_update.go b/op-node/rollup/engine/engine_update.go deleted file mode 100644 index 7ea984388cb87..0000000000000 --- a/op-node/rollup/engine/engine_update.go +++ /dev/null @@ -1,122 +0,0 @@ -package engine - -import ( - "context" - "errors" - "fmt" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rpc" -) - -// isDepositTx checks an opaqueTx to determine if it is a Deposit Transaction -// It has to return an error in the case the transaction is empty -func isDepositTx(opaqueTx eth.Data) (bool, error) { - if len(opaqueTx) == 0 { - return false, errors.New("empty transaction") - } - return opaqueTx[0] == types.DepositTxType, nil -} - -// lastDeposit finds the index of last deposit at the start of the transactions. -// It walks the transactions from the start until it finds a non-deposit tx. -// An error is returned if any looked at transaction cannot be decoded -func lastDeposit(txns []eth.Data) (int, error) { - var lastDeposit int - for i, tx := range txns { - deposit, err := isDepositTx(tx) - if err != nil { - return 0, fmt.Errorf("invalid transaction at idx %d", i) - } - if deposit { - lastDeposit = i - } else { - break - } - } - return lastDeposit, nil -} - -func sanityCheckPayload(payload *eth.ExecutionPayload) error { - // Sanity check payload before inserting it - if len(payload.Transactions) == 0 { - return errors.New("no transactions in returned payload") - } - if payload.Transactions[0][0] != types.DepositTxType { - return fmt.Errorf("first transaction was not deposit tx. Got %v", payload.Transactions[0][0]) - } - // Ensure that the deposits are first - lastDeposit, err := lastDeposit(payload.Transactions) - if err != nil { - return fmt.Errorf("failed to find last deposit: %w", err) - } - // Ensure no deposits after last deposit - for i := lastDeposit + 1; i < len(payload.Transactions); i++ { - tx := payload.Transactions[i] - deposit, err := isDepositTx(tx) - if err != nil { - return fmt.Errorf("failed to decode transaction idx %d: %w", i, err) - } - if deposit { - return fmt.Errorf("deposit tx (%d) after other tx in l2 block with prev deposit at idx %d", i, lastDeposit) - } - } - return nil -} - -var ErrEngineSyncing = errors.New("engine is syncing") - -type BlockInsertionErrType uint - -const ( - // BlockInsertOK indicates that the payload was successfully executed and appended to the canonical chain. - BlockInsertOK BlockInsertionErrType = iota - // BlockInsertTemporaryErr indicates that the insertion failed but may succeed at a later time without changes to the payload. - BlockInsertTemporaryErr - // BlockInsertPrestateErr indicates that the pre-state to insert the payload could not be prepared, e.g. due to missing chain data. - BlockInsertPrestateErr - // BlockInsertPayloadErr indicates that the payload was invalid and cannot become canonical. - BlockInsertPayloadErr -) - -// startPayload starts an execution payload building process in the provided Engine, with the given attributes. -// The severity of the error is distinguished to determine whether the same payload attributes may be re-attempted later. -func startPayload(ctx context.Context, eng ExecEngine, fc eth.ForkchoiceState, attrs *eth.PayloadAttributes) (id eth.PayloadID, errType BlockInsertionErrType, err error) { - fcRes, err := eng.ForkchoiceUpdate(ctx, &fc, attrs) - if err != nil { - var rpcErr rpc.Error - if errors.As(err, &rpcErr) { - switch code := eth.ErrorCode(rpcErr.ErrorCode()); code { - case eth.InvalidForkchoiceState: - return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("pre-block-creation forkchoice update was inconsistent with engine, need reset to resolve: %w", err) - case eth.InvalidPayloadAttributes: - return eth.PayloadID{}, BlockInsertPayloadErr, fmt.Errorf("payload attributes are not valid, cannot build block: %w", err) - default: - if code.IsEngineError() { - return eth.PayloadID{}, BlockInsertPrestateErr, fmt.Errorf("unexpected engine error code in forkchoice-updated response: %w", err) - } else { - return eth.PayloadID{}, BlockInsertTemporaryErr, fmt.Errorf("unexpected generic error code in forkchoice-updated response: %w", err) - } - } - } else { - return eth.PayloadID{}, BlockInsertTemporaryErr, fmt.Errorf("failed to create new block via forkchoice: %w", err) - } - } - - switch fcRes.PayloadStatus.Status { - // TODO: snap sync - specify explicit different error type if node is syncing - case eth.ExecutionInvalid, eth.ExecutionInvalidBlockHash: - return eth.PayloadID{}, BlockInsertPayloadErr, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus) - case eth.ExecutionValid: - id := fcRes.PayloadID - if id == nil { - return eth.PayloadID{}, BlockInsertTemporaryErr, errors.New("nil id in forkchoice result when expecting a valid ID") - } - return *id, BlockInsertOK, nil - case eth.ExecutionSyncing: - return eth.PayloadID{}, BlockInsertTemporaryErr, ErrEngineSyncing - default: - return eth.PayloadID{}, BlockInsertTemporaryErr, eth.ForkchoiceUpdateErr(fcRes.PayloadStatus) - } -} diff --git a/op-node/rollup/engine/events.go b/op-node/rollup/engine/events.go index ece5a4c5ecb4d..5b4711cbef040 100644 --- a/op-node/rollup/engine/events.go +++ b/op-node/rollup/engine/events.go @@ -1,18 +1,10 @@ package engine import ( - "context" - "errors" - "fmt" - "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" ) // ReplaceBlockSource is a magic value for the "Source" attribute, @@ -25,23 +17,7 @@ var ReplaceBlockSource = eth.L1BlockRef{ Time: 0, } -type Metrics interface { - CountSequencedTxsInBlock(txns int, deposits int) - - RecordSequencerBuildingDiffTime(duration time.Duration) - RecordSequencerSealingTime(duration time.Duration) -} - -// ForkchoiceRequestEvent signals to the engine that it should emit an artificial -// forkchoice-update event, to signal the latest forkchoice to other derivers. -// This helps decouple derivers from the actual engine state, -// while also not making the derivers wait for a forkchoice update at random. -type ForkchoiceRequestEvent struct { -} - -func (ev ForkchoiceRequestEvent) String() string { - return "forkchoice-request" -} +// no local metrics interface; engine depends directly on op-node/metrics.Metricer type ForkchoiceUpdateEvent struct { UnsafeL2Head, SafeL2Head, FinalizedL2Head eth.L2BlockRef @@ -51,19 +27,6 @@ func (ev ForkchoiceUpdateEvent) String() string { return "forkchoice-update" } -// PromoteUnsafeEvent signals that the given block may now become a canonical unsafe block. -// This is pre-forkchoice update; the change may not be reflected yet in the EL. -// Note that the legacy pre-event-refactor code-path (processing P2P blocks) does fire this, -// but manually, duplicate with the newer events processing code-path. -// See EngineController.InsertUnsafePayload. -type PromoteUnsafeEvent struct { - Ref eth.L2BlockRef -} - -func (ev PromoteUnsafeEvent) String() string { - return "promote-unsafe" -} - // UnsafeUpdateEvent signals that the given block is now considered safe. // This is pre-forkchoice update; the change may not be reflected yet in the EL. type UnsafeUpdateEvent struct { @@ -83,16 +46,6 @@ func (ev PromoteCrossUnsafeEvent) String() string { return "promote-cross-unsafe" } -// CrossUnsafeUpdateEvent signals that the given block is now considered cross-unsafe. -type CrossUnsafeUpdateEvent struct { - CrossUnsafe eth.L2BlockRef - LocalUnsafe eth.L2BlockRef -} - -func (ev CrossUnsafeUpdateEvent) String() string { - return "cross-unsafe-update" -} - type PendingSafeUpdateEvent struct { PendingSafe eth.L2BlockRef Unsafe eth.L2BlockRef // tip, added to the signal, to determine if there are existing blocks to consolidate @@ -102,36 +55,6 @@ func (ev PendingSafeUpdateEvent) String() string { return "pending-safe-update" } -// PromotePendingSafeEvent signals that a block can be marked as pending-safe, and/or safe. -type PromotePendingSafeEvent struct { - Ref eth.L2BlockRef - Concluding bool // Concludes the pending phase, so can be promoted to (local) safe - Source eth.L1BlockRef -} - -func (ev PromotePendingSafeEvent) String() string { - return "promote-pending-safe" -} - -// PromoteLocalSafeEvent signals that a block can be promoted to local-safe. -type PromoteLocalSafeEvent struct { - Ref eth.L2BlockRef - Source eth.L1BlockRef -} - -func (ev PromoteLocalSafeEvent) String() string { - return "promote-local-safe" -} - -type CrossSafeUpdateEvent struct { - CrossSafe eth.L2BlockRef - LocalSafe eth.L2BlockRef -} - -func (ev CrossSafeUpdateEvent) String() string { - return "cross-safe-update" -} - // LocalSafeUpdateEvent signals that a block is now considered to be local-safe. type LocalSafeUpdateEvent struct { Ref eth.L2BlockRef @@ -142,18 +65,8 @@ func (ev LocalSafeUpdateEvent) String() string { return "local-safe-update" } -// PromoteSafeEvent signals that a block can be promoted to cross-safe. -type PromoteSafeEvent struct { - Ref eth.L2BlockRef - Source eth.L1BlockRef -} - -func (ev PromoteSafeEvent) String() string { - return "promote-safe" -} - // SafeDerivedEvent signals that a block was determined to be safe, and derived from the given L1 block. -// This is signaled upon successful processing of PromoteSafeEvent. +// This is signaled upon procedural call of PromoteSafe method type SafeDerivedEvent struct { Safe eth.L2BlockRef Source eth.L1BlockRef @@ -163,89 +76,6 @@ func (ev SafeDerivedEvent) String() string { return "safe-derived" } -type PendingSafeRequestEvent struct { -} - -func (ev PendingSafeRequestEvent) String() string { - return "pending-safe-request" -} - -type ProcessUnsafePayloadEvent struct { - Envelope *eth.ExecutionPayloadEnvelope -} - -func (ev ProcessUnsafePayloadEvent) String() string { - return "process-unsafe-payload" -} - -type TryBackupUnsafeReorgEvent struct { -} - -func (ev TryBackupUnsafeReorgEvent) String() string { - return "try-backup-unsafe-reorg" -} - -type TryUpdateEngineEvent struct { - // These fields will be zero-value (BuildStarted,InsertStarted=time.Time{}, Envelope=nil) if - // this event is emitted outside of engineDeriver.onPayloadSuccess - BuildStarted time.Time - InsertStarted time.Time - Envelope *eth.ExecutionPayloadEnvelope -} - -func (ev TryUpdateEngineEvent) String() string { - return "try-update-engine" -} - -// Checks for the existence of the Envelope field, which is only -// added by the PayloadSuccessEvent -func (ev TryUpdateEngineEvent) triggeredByPayloadSuccess() bool { - return ev.Envelope != nil -} - -// Returns key/value pairs that can be logged and are useful for plotting -// block build/insert time as a way to measure performance. -func (ev TryUpdateEngineEvent) getBlockProcessingMetrics() []interface{} { - fcuFinish := time.Now() - payload := ev.Envelope.ExecutionPayload - - logValues := []interface{}{ - "hash", payload.BlockHash, - "number", uint64(payload.BlockNumber), - "state_root", payload.StateRoot, - "timestamp", uint64(payload.Timestamp), - "parent", payload.ParentHash, - "prev_randao", payload.PrevRandao, - "fee_recipient", payload.FeeRecipient, - "txs", len(payload.Transactions), - } - - var totalTime time.Duration - var mgasps float64 - if !ev.BuildStarted.IsZero() { - totalTime = fcuFinish.Sub(ev.BuildStarted) - logValues = append(logValues, - "build_time", common.PrettyDuration(ev.InsertStarted.Sub(ev.BuildStarted)), - "insert_time", common.PrettyDuration(fcuFinish.Sub(ev.InsertStarted)), - ) - } else if !ev.InsertStarted.IsZero() { - totalTime = fcuFinish.Sub(ev.InsertStarted) - } - - // Avoid divide-by-zero for mgasps - if totalTime > 0 { - mgasps = float64(payload.GasUsed) * 1000 / float64(totalTime) - } - - logValues = append(logValues, - "total_time", common.PrettyDuration(totalTime), - "mgas", float64(payload.GasUsed)/1000000, - "mgasps", mgasps, - ) - - return logValues -} - type EngineResetConfirmedEvent struct { LocalUnsafe eth.L2BlockRef CrossUnsafe eth.L2BlockRef @@ -258,15 +88,6 @@ func (ev EngineResetConfirmedEvent) String() string { return "engine-reset-confirmed" } -// PromoteFinalizedEvent signals that a block can be marked as finalized. -type PromoteFinalizedEvent struct { - Ref eth.L2BlockRef -} - -func (ev PromoteFinalizedEvent) String() string { - return "promote-finalized" -} - // FinalizedUpdateEvent signals that a block has been marked as finalized. type FinalizedUpdateEvent struct { Ref eth.L2BlockRef @@ -276,16 +97,6 @@ func (ev FinalizedUpdateEvent) String() string { return "finalized-update" } -// CrossUpdateRequestEvent triggers update events to be emitted, repeating the current state. -type CrossUpdateRequestEvent struct { - CrossUnsafe bool - CrossSafe bool -} - -func (ev CrossUpdateRequestEvent) String() string { - return "cross-update-request" -} - // InteropInvalidateBlockEvent is emitted when a block needs to be invalidated, and a replacement is needed. type InteropInvalidateBlockEvent struct { Invalidated eth.BlockRef @@ -306,265 +117,6 @@ func (ev InteropReplacedBlockEvent) String() string { return "interop-replaced-block" } -type ELSyncStartedEvent struct{} - -func (ev ELSyncStartedEvent) String() string { - return "el-sync-started" -} - -type EngDeriver struct { - metrics Metrics - - log log.Logger - cfg *rollup.Config - ec *EngineController - ctx context.Context - emitter event.Emitter -} - -var _ event.Deriver = (*EngDeriver)(nil) - -func NewEngDeriver(log log.Logger, ctx context.Context, cfg *rollup.Config, - metrics Metrics, ec *EngineController, -) *EngDeriver { - return &EngDeriver{ - log: log, - cfg: cfg, - ec: ec, - ctx: ctx, - metrics: metrics, - } -} - -func (d *EngDeriver) AttachEmitter(em event.Emitter) { - d.emitter = em -} - -func (d *EngDeriver) OnEvent(ctx context.Context, ev event.Event) bool { - d.ec.mu.Lock() - defer d.ec.mu.Unlock() - switch x := ev.(type) { - case TryBackupUnsafeReorgEvent: - // If we don't need to call FCU to restore unsafeHead using backupUnsafe, keep going b/c - // this was a no-op(except correcting invalid state when backupUnsafe is empty but TryBackupUnsafeReorg called). - fcuCalled, err := d.ec.TryBackupUnsafeReorg(d.ctx) - // Dealing with legacy here: it used to skip over the error-handling if fcuCalled was false. - // But that combination is not actually a code-path in TryBackupUnsafeReorg. - // We should drop fcuCalled, and make the function emit events directly, - // once there are no more synchronous callers. - if !fcuCalled && err != nil { - d.log.Crit("unexpected TryBackupUnsafeReorg error after no FCU call", "err", err) - } - if err != nil { - // If we needed to perform a network call, then we should yield even if we did not encounter an error. - if errors.Is(err, derive.ErrReset) { - d.emitter.Emit(ctx, rollup.ResetEvent{Err: err}) - } else if errors.Is(err, derive.ErrTemporary) { - d.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) - } else { - d.emitter.Emit(ctx, rollup.CriticalErrorEvent{ - Err: fmt.Errorf("unexpected TryBackupUnsafeReorg error type: %w", err), - }) - } - } - case TryUpdateEngineEvent: - // If we don't need to call FCU, keep going b/c this was a no-op. If we needed to - // perform a network call, then we should yield even if we did not encounter an error. - if err := d.ec.TryUpdateEngine(d.ctx); err != nil && !errors.Is(err, ErrNoFCUNeeded) { - if errors.Is(err, derive.ErrReset) { - d.emitter.Emit(ctx, rollup.ResetEvent{Err: err}) - } else if errors.Is(err, derive.ErrTemporary) { - d.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) - } else { - d.emitter.Emit(ctx, rollup.CriticalErrorEvent{ - Err: fmt.Errorf("unexpected TryUpdateEngine error type: %w", err), - }) - } - } else if x.triggeredByPayloadSuccess() { - logValues := x.getBlockProcessingMetrics() - d.log.Info("Inserted new L2 unsafe block", logValues...) - } - case ProcessUnsafePayloadEvent: - ref, err := derive.PayloadToBlockRef(d.cfg, x.Envelope.ExecutionPayload) - if err != nil { - d.log.Error("failed to decode L2 block ref from payload", "err", err) - return true - } - // Avoid re-processing the same unsafe payload if it has already been processed. Because a FCU event emits the ProcessUnsafePayloadEvent - // it is possible to have multiple queueed up ProcessUnsafePayloadEvent for the same L2 block. This becomes an issue when processing - // a large number of unsafe payloads at once (like when iterating through the payload queue after the safe head has advanced). - if ref.BlockRef().ID() == d.ec.UnsafeL2Head().BlockRef().ID() { - return true - } - if err := d.ec.InsertUnsafePayload(d.ctx, x.Envelope, ref); err != nil { - d.log.Info("failed to insert payload", "ref", ref, - "txs", len(x.Envelope.ExecutionPayload.Transactions), "err", err) - // yes, duplicate error-handling. After all derivers are interacting with the engine - // through events, we can drop the engine-controller interface: - // unify the events handler with the engine-controller, - // remove a lot of code, and not do this error translation. - if errors.Is(err, derive.ErrReset) { - d.emitter.Emit(ctx, rollup.ResetEvent{Err: err}) - } else if errors.Is(err, derive.ErrTemporary) { - d.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{Err: err}) - } else { - d.emitter.Emit(ctx, rollup.CriticalErrorEvent{ - Err: fmt.Errorf("unexpected InsertUnsafePayload error type: %w", err), - }) - } - } else { - d.log.Info("successfully processed payload", "ref", ref, "txs", len(x.Envelope.ExecutionPayload.Transactions)) - } - case ForkchoiceRequestEvent: - d.emitter.Emit(ctx, ForkchoiceUpdateEvent{ - UnsafeL2Head: d.ec.UnsafeL2Head(), - SafeL2Head: d.ec.SafeL2Head(), - FinalizedL2Head: d.ec.Finalized(), - }) - case rollup.ForceResetEvent: - ForceEngineReset(d.ec, x) - - // Time to apply the changes to the underlying engine - d.emitter.Emit(ctx, TryUpdateEngineEvent{}) - - v := EngineResetConfirmedEvent{ - LocalUnsafe: d.ec.UnsafeL2Head(), - CrossUnsafe: d.ec.CrossUnsafeL2Head(), - LocalSafe: d.ec.LocalSafeL2Head(), - CrossSafe: d.ec.SafeL2Head(), - Finalized: d.ec.Finalized(), - } - // We do not emit the original event values, since those might not be set (optional attributes). - d.emitter.Emit(ctx, v) - d.log.Info("Reset of Engine is completed", - "local_unsafe", v.LocalUnsafe, - "cross_unsafe", v.CrossUnsafe, - "local_safe", v.LocalSafe, - "cross_safe", v.CrossSafe, - "finalized", v.Finalized, - ) - case PromoteUnsafeEvent: - // Backup unsafeHead when new block is not built on original unsafe head. - if d.ec.unsafeHead.Number >= x.Ref.Number { - d.ec.SetBackupUnsafeL2Head(d.ec.unsafeHead, false) - } - d.ec.SetUnsafeHead(x.Ref) - d.emitter.Emit(ctx, UnsafeUpdateEvent(x)) - case UnsafeUpdateEvent: - // pre-interop everything that is local-unsafe is also immediately cross-unsafe. - if !d.cfg.IsInterop(x.Ref.Time) { - d.emitter.Emit(ctx, PromoteCrossUnsafeEvent(x)) - } - // Try to apply the forkchoice changes - d.emitter.Emit(ctx, TryUpdateEngineEvent{}) - case PromoteCrossUnsafeEvent: - d.ec.SetCrossUnsafeHead(x.Ref) - d.emitter.Emit(ctx, CrossUnsafeUpdateEvent{ - CrossUnsafe: x.Ref, - LocalUnsafe: d.ec.UnsafeL2Head(), - }) - case PendingSafeRequestEvent: - d.emitter.Emit(ctx, PendingSafeUpdateEvent{ - PendingSafe: d.ec.PendingSafeL2Head(), - Unsafe: d.ec.UnsafeL2Head(), - }) - case PromotePendingSafeEvent: - // Only promote if not already stale. - // Resets/overwrites happen through engine-resets, not through promotion. - if x.Ref.Number > d.ec.PendingSafeL2Head().Number { - d.log.Debug("Updating pending safe", "pending_safe", x.Ref, "local_safe", d.ec.LocalSafeL2Head(), "unsafe", d.ec.UnsafeL2Head(), "concluding", x.Concluding) - d.ec.SetPendingSafeL2Head(x.Ref) - d.emitter.Emit(ctx, PendingSafeUpdateEvent{ - PendingSafe: d.ec.PendingSafeL2Head(), - Unsafe: d.ec.UnsafeL2Head(), - }) - } - if x.Concluding && x.Ref.Number > d.ec.LocalSafeL2Head().Number { - d.emitter.Emit(ctx, PromoteLocalSafeEvent{ - Ref: x.Ref, - Source: x.Source, - }) - } - case PromoteLocalSafeEvent: - d.log.Debug("Updating local safe", "local_safe", x.Ref, "safe", d.ec.SafeL2Head(), "unsafe", d.ec.UnsafeL2Head()) - d.ec.SetLocalSafeHead(x.Ref) - d.emitter.Emit(ctx, LocalSafeUpdateEvent(x)) - case LocalSafeUpdateEvent: - // pre-interop everything that is local-safe is also immediately cross-safe. - if !d.cfg.IsInterop(x.Ref.Time) { - d.emitter.Emit(ctx, PromoteSafeEvent(x)) - } - case PromoteSafeEvent: - d.log.Debug("Updating safe", "safe", x.Ref, "unsafe", d.ec.UnsafeL2Head()) - d.ec.SetSafeHead(x.Ref) - // Finalizer can pick up this safe cross-block now - d.emitter.Emit(ctx, SafeDerivedEvent{Safe: x.Ref, Source: x.Source}) - d.emitter.Emit(ctx, CrossSafeUpdateEvent{ - CrossSafe: d.ec.SafeL2Head(), - LocalSafe: d.ec.LocalSafeL2Head(), - }) - if x.Ref.Number > d.ec.crossUnsafeHead.Number { - d.log.Debug("Cross Unsafe Head is stale, updating to match cross safe", "cross_unsafe", d.ec.crossUnsafeHead, "cross_safe", x.Ref) - d.ec.SetCrossUnsafeHead(x.Ref) - d.emitter.Emit(ctx, CrossUnsafeUpdateEvent{ - CrossUnsafe: x.Ref, - LocalUnsafe: d.ec.UnsafeL2Head(), - }) - } - // Try to apply the forkchoice changes - d.emitter.Emit(ctx, TryUpdateEngineEvent{}) - case PromoteFinalizedEvent: - if x.Ref.Number < d.ec.Finalized().Number { - d.log.Error("Cannot rewind finality,", "ref", x.Ref, "finalized", d.ec.Finalized()) - return true - } - if x.Ref.Number > d.ec.SafeL2Head().Number { - d.log.Error("Block must be safe before it can be finalized", "ref", x.Ref, "safe", d.ec.SafeL2Head()) - return true - } - d.ec.SetFinalizedHead(x.Ref) - d.emitter.Emit(ctx, FinalizedUpdateEvent(x)) - // Try to apply the forkchoice changes - d.emitter.Emit(ctx, TryUpdateEngineEvent{}) - case CrossUpdateRequestEvent: - if x.CrossUnsafe { - d.emitter.Emit(ctx, CrossUnsafeUpdateEvent{ - CrossUnsafe: d.ec.CrossUnsafeL2Head(), - LocalUnsafe: d.ec.UnsafeL2Head(), - }) - } - if x.CrossSafe { - d.emitter.Emit(ctx, CrossSafeUpdateEvent{ - CrossSafe: d.ec.SafeL2Head(), - LocalSafe: d.ec.LocalSafeL2Head(), - }) - } - case InteropInvalidateBlockEvent: - d.emitter.Emit(ctx, BuildStartEvent{Attributes: x.Attributes}) - case BuildStartEvent: - d.onBuildStart(ctx, x) - case BuildStartedEvent: - d.onBuildStarted(ctx, x) - case BuildSealEvent: - d.onBuildSeal(ctx, x) - case BuildSealedEvent: - d.onBuildSealed(ctx, x) - case BuildInvalidEvent: - d.onBuildInvalid(ctx, x) - case BuildCancelEvent: - d.onBuildCancel(ctx, x) - case PayloadProcessEvent: - d.onPayloadProcess(ctx, x) - case PayloadSuccessEvent: - d.onPayloadSuccess(ctx, x) - case PayloadInvalidEvent: - d.onPayloadInvalid(ctx, x) - default: - return false - } - return true -} - type ResetEngineControl interface { SetUnsafeHead(eth.L2BlockRef) SetCrossUnsafeHead(ref eth.L2BlockRef) @@ -575,21 +127,21 @@ type ResetEngineControl interface { SetPendingSafeL2Head(eth.L2BlockRef) } -func ForceEngineReset(ec ResetEngineControl, x rollup.ForceResetEvent) { - ec.SetUnsafeHead(x.LocalUnsafe) +func ForceEngineReset(ec ResetEngineControl, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) { + ec.SetUnsafeHead(localUnsafe) // cross-safe is fine to revert back, it does not affect engine logic, just sync-status - ec.SetCrossUnsafeHead(x.CrossUnsafe) + ec.SetCrossUnsafeHead(crossUnsafe) // derivation continues at local-safe point - ec.SetLocalSafeHead(x.LocalSafe) - ec.SetPendingSafeL2Head(x.LocalSafe) + ec.SetLocalSafeHead(localSafe) + ec.SetPendingSafeL2Head(localSafe) // "safe" in RPC terms is cross-safe - ec.SetSafeHead(x.CrossSafe) + ec.SetSafeHead(crossSafe) // finalized head - ec.SetFinalizedHead(x.Finalized) + ec.SetFinalizedHead(finalized) ec.SetBackupUnsafeL2Head(eth.L2BlockRef{}, false) } diff --git a/op-node/rollup/engine/iface.go b/op-node/rollup/engine/iface.go deleted file mode 100644 index 0989b125df795..0000000000000 --- a/op-node/rollup/engine/iface.go +++ /dev/null @@ -1,32 +0,0 @@ -package engine - -import ( - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// EngineState provides a read-only interface of the forkchoice state properties of the L2 Engine. -type EngineState interface { - Finalized() eth.L2BlockRef - UnsafeL2Head() eth.L2BlockRef - SafeL2Head() eth.L2BlockRef -} - -type Engine interface { - ExecEngine - derive.L2Source -} - -type LocalEngineState interface { - EngineState - - PendingSafeL2Head() eth.L2BlockRef - BackupUnsafeL2Head() eth.L2BlockRef -} - -type LocalEngineControl interface { - LocalEngineState - ResetEngineControl -} - -var _ LocalEngineControl = (*EngineController)(nil) diff --git a/op-node/rollup/engine/payload_invalid.go b/op-node/rollup/engine/payload_invalid.go index 16e193fb59c55..350ddfaefb2d3 100644 --- a/op-node/rollup/engine/payload_invalid.go +++ b/op-node/rollup/engine/payload_invalid.go @@ -1,8 +1,6 @@ package engine import ( - "context" - "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -14,8 +12,3 @@ type PayloadInvalidEvent struct { func (ev PayloadInvalidEvent) String() string { return "payload-invalid" } - -func (eq *EngDeriver) onPayloadInvalid(ctx context.Context, ev PayloadInvalidEvent) { - eq.log.Warn("Payload was invalid", "block", ev.Envelope.ExecutionPayload.ID(), - "err", ev.Err, "timestamp", uint64(ev.Envelope.ExecutionPayload.Timestamp)) -} diff --git a/op-node/rollup/engine/payload_process.go b/op-node/rollup/engine/payload_process.go index 12ee4d66866a5..b80968167d702 100644 --- a/op-node/rollup/engine/payload_process.go +++ b/op-node/rollup/engine/payload_process.go @@ -24,15 +24,15 @@ func (ev PayloadProcessEvent) String() string { return "payload-process" } -func (eq *EngDeriver) onPayloadProcess(ctx context.Context, ev PayloadProcessEvent) { - rpcCtx, cancel := context.WithTimeout(eq.ctx, payloadProcessTimeout) +func (e *EngineController) onPayloadProcess(ctx context.Context, ev PayloadProcessEvent) { + rpcCtx, cancel := context.WithTimeout(e.ctx, payloadProcessTimeout) defer cancel() insertStart := time.Now() - status, err := eq.ec.engine.NewPayload(rpcCtx, + status, err := e.engine.NewPayload(rpcCtx, ev.Envelope.ExecutionPayload, ev.Envelope.ParentBeaconBlockRoot) if err != nil { - eq.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ Err: fmt.Errorf("failed to insert execution payload: %w", err), }) return @@ -41,18 +41,18 @@ func (eq *EngDeriver) onPayloadProcess(ctx context.Context, ev PayloadProcessEve case eth.ExecutionInvalid, eth.ExecutionInvalidBlockHash: // Depending on execution engine, not all block-validity checks run immediately on build-start // at the time of the forkchoiceUpdated engine-API call, nor during getPayload. - if ev.DerivedFrom != (eth.L1BlockRef{}) && eq.cfg.IsHolocene(ev.DerivedFrom.Time) { - eq.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Ref.ParentID(), ev.DerivedFrom) + if ev.DerivedFrom != (eth.L1BlockRef{}) && e.rollupCfg.IsHolocene(ev.DerivedFrom.Time) { + e.emitDepositsOnlyPayloadAttributesRequest(ctx, ev.Ref.ParentID(), ev.DerivedFrom) return } - eq.emitter.Emit(ctx, PayloadInvalidEvent{ + e.emitter.Emit(ctx, PayloadInvalidEvent{ Envelope: ev.Envelope, Err: eth.NewPayloadErr(ev.Envelope.ExecutionPayload, status), }) return case eth.ExecutionValid: - eq.emitter.Emit(ctx, PayloadSuccessEvent{ + e.emitter.Emit(ctx, PayloadSuccessEvent{ Concluding: ev.Concluding, DerivedFrom: ev.DerivedFrom, BuildStarted: ev.BuildStarted, @@ -62,7 +62,7 @@ func (eq *EngDeriver) onPayloadProcess(ctx context.Context, ev PayloadProcessEve }) return default: - eq.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ + e.emitter.Emit(ctx, rollup.EngineTemporaryErrorEvent{ Err: eth.NewPayloadErr(ev.Envelope.ExecutionPayload, status), }) return diff --git a/op-node/rollup/engine/payload_success.go b/op-node/rollup/engine/payload_success.go index 09cb0b2e71bac..6cb6d356254d9 100644 --- a/op-node/rollup/engine/payload_success.go +++ b/op-node/rollup/engine/payload_success.go @@ -4,7 +4,6 @@ import ( "context" "time" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -24,43 +23,33 @@ func (ev PayloadSuccessEvent) String() string { return "payload-success" } -func (eq *EngDeriver) onPayloadSuccess(ctx context.Context, ev PayloadSuccessEvent) { +func (e *EngineController) onPayloadSuccess(ctx context.Context, ev PayloadSuccessEvent) { if ev.DerivedFrom == ReplaceBlockSource { - eq.log.Warn("Successfully built replacement block, resetting chain to continue now", "replacement", ev.Ref) + e.log.Warn("Successfully built replacement block, resetting chain to continue now", "replacement", ev.Ref) // Change the engine state to make the replacement block the cross-safe head of the chain, // And continue syncing from there. - eq.emitter.Emit(ctx, rollup.ForceResetEvent{ - LocalUnsafe: ev.Ref, - CrossUnsafe: ev.Ref, - LocalSafe: ev.Ref, - CrossSafe: ev.Ref, - Finalized: eq.ec.Finalized(), - }) - eq.emitter.Emit(ctx, InteropReplacedBlockEvent{ + e.forceReset(ctx, ev.Ref, ev.Ref, ev.Ref, ev.Ref, e.Finalized()) + e.emitter.Emit(ctx, InteropReplacedBlockEvent{ Envelope: ev.Envelope, Ref: ev.Ref.BlockRef(), }) // Apply it to the execution engine - eq.emitter.Emit(ctx, TryUpdateEngineEvent{}) + e.tryUpdateEngine(ctx) // Not a regular reset, since we don't wind back to any L2 block. // We start specifically from the replacement block. return } - eq.emitter.Emit(ctx, PromoteUnsafeEvent{Ref: ev.Ref}) - + // TryUpdateUnsafe, TryUpdatePendingSafe, TryUpdateLocalSafe, tryUpdateEngine must be sequentially invoked + e.tryUpdateUnsafe(ctx, ev.Ref) // If derived from L1, then it can be considered (pending) safe if ev.DerivedFrom != (eth.L1BlockRef{}) { - eq.emitter.Emit(ctx, PromotePendingSafeEvent{ - Ref: ev.Ref, - Concluding: ev.Concluding, - Source: ev.DerivedFrom, - }) + e.tryUpdatePendingSafe(ctx, ev.Ref, ev.Concluding, ev.DerivedFrom) + e.tryUpdateLocalSafe(ctx, ev.Ref, ev.Concluding, ev.DerivedFrom) + } + // Now if possible synchronously call FCU + err := e.tryUpdateEngineInternal(ctx) + if err != nil { + e.log.Error("Failed to update engine", "error", err) } - - eq.emitter.Emit(ctx, TryUpdateEngineEvent{ - BuildStarted: ev.BuildStarted, - InsertStarted: ev.InsertStarted, - Envelope: ev.Envelope, - }) } diff --git a/op-node/rollup/clsync/payloads_queue.go b/op-node/rollup/engine/payloads_queue.go similarity index 76% rename from op-node/rollup/clsync/payloads_queue.go rename to op-node/rollup/engine/payloads_queue.go index 7061e00a99ac6..fe7ccb29fd564 100644 --- a/op-node/rollup/clsync/payloads_queue.go +++ b/op-node/rollup/engine/payloads_queue.go @@ -1,4 +1,4 @@ -package clsync +package engine import ( "container/heap" @@ -126,12 +126,16 @@ func (upq *PayloadsQueue) Push(e *eth.ExecutionPayloadEnvelope) error { envelope: e, size: size, }) + upq.blockHashes[e.ExecutionPayload.BlockHash] = struct{}{} upq.currentSize += size for upq.currentSize > upq.MaxSize { env := upq.Pop() upq.log.Info("Dropping payload from payload queue because the payload queue is too large", "id", env.ExecutionPayload.ID()) + // if we popped the same payload, return error + if env.ExecutionPayload.BlockHash == e.ExecutionPayload.BlockHash { + return fmt.Errorf("cannot add payload %s, since it has the oldest block number in queue and queue size limit is reached", e.ExecutionPayload.ID()) + } } - upq.blockHashes[e.ExecutionPayload.BlockHash] = struct{}{} return nil } @@ -157,3 +161,41 @@ func (upq *PayloadsQueue) Pop() *eth.ExecutionPayloadEnvelope { delete(upq.blockHashes, ps.envelope.ExecutionPayload.BlockHash) return ps.envelope } + +func (pq *PayloadsQueue) DropInapplicableUnsafePayloads(event ForkchoiceUpdateEvent) { + for { + if pq.Len() == 0 { + return + } + + nextEnvelope := pq.Peek() + nextPayload := nextEnvelope.ExecutionPayload + + if nextPayload.BlockHash == event.UnsafeL2Head.Hash { + pq.log.Debug("successfully processed payload, removing it from the payloads queue now") + pq.Pop() + continue + } + + if uint64(nextPayload.BlockNumber) <= event.SafeL2Head.Number { + pq.log.Info("skipping unsafe payload, since it is older than safe head", "safe", event.SafeL2Head.ID(), "unsafe", event.UnsafeL2Head.ID(), "unsafe_payload", nextPayload.ID()) + pq.Pop() + continue + } + + if uint64(nextPayload.BlockNumber) <= event.UnsafeL2Head.Number { + pq.log.Info("skipping unsafe payload, since it is older than unsafe head", "safe", event.SafeL2Head.ID(), "unsafe", event.UnsafeL2Head.ID(), "unsafe_payload", nextPayload.ID()) + pq.Pop() + continue + } + + // Ensure that the unsafe payload builds upon the current unsafe head + if uint64(nextPayload.BlockNumber) == event.UnsafeL2Head.Number+1 && nextPayload.ParentHash != event.UnsafeL2Head.Hash { + pq.log.Info("skipping unsafe payload, since it does not build onto the existing unsafe chain", "safe", event.SafeL2Head.ID(), "unsafe", event.UnsafeL2Head.ID(), "unsafe_payload", nextPayload.ID()) + pq.Pop() + continue + } + + break + } +} diff --git a/op-node/rollup/engine/payloads_queue_test.go b/op-node/rollup/engine/payloads_queue_test.go new file mode 100644 index 0000000000000..8686c9f4e94fd --- /dev/null +++ b/op-node/rollup/engine/payloads_queue_test.go @@ -0,0 +1,321 @@ +package engine + +import ( + "container/heap" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" +) + +func TestPayloadsByNumber(t *testing.T) { + p := payloadsByNumber{} + mk := func(i uint64) payloadAndSize { + return payloadAndSize{ + envelope: ð.ExecutionPayloadEnvelope{ + ExecutionPayload: ð.ExecutionPayload{ + BlockNumber: eth.Uint64Quantity(i), + }, + }, + } + } + // add payload A, check it was added + a := mk(123) + heap.Push(&p, a) + require.Equal(t, p.Len(), 1) + require.Equal(t, p[0], a) + + // add payload B, check it was added in top-priority spot + b := mk(100) + heap.Push(&p, b) + require.Equal(t, p.Len(), 2) + require.Equal(t, p[0], b) + + // add payload C, check it did not get first like B, since block num is higher + c := mk(150) + heap.Push(&p, c) + require.Equal(t, p.Len(), 3) + require.Equal(t, p[0], b) // still b + + // pop b + heap.Pop(&p) + require.Equal(t, p.Len(), 2) + require.Equal(t, p[0], a) + + // pop a + heap.Pop(&p) + require.Equal(t, p.Len(), 1) + require.Equal(t, p[0], c) + + // pop c + heap.Pop(&p) + require.Equal(t, p.Len(), 0) + + // duplicate entry + heap.Push(&p, b) + require.Equal(t, p.Len(), 1) + heap.Push(&p, b) + require.Equal(t, p.Len(), 2) + heap.Pop(&p) + require.Equal(t, p.Len(), 1) +} + +func TestPayloadMemSize(t *testing.T) { + require.Equal(t, payloadMemFixedCost, payloadMemSize(nil), "nil is same fixed cost") + require.Equal(t, payloadMemFixedCost, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{}}), "empty payload fixed cost") + require.Equal(t, payloadMemFixedCost+payloadTxMemOverhead, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{nil}}}), "nil tx counts") + require.Equal(t, payloadMemFixedCost+payloadTxMemOverhead, payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{make([]byte, 0)}}}), "empty tx counts") + require.Equal(t, payloadMemFixedCost+4*payloadTxMemOverhead+42+1337+0+1, + payloadMemSize(ð.ExecutionPayloadEnvelope{ExecutionPayload: ð.ExecutionPayload{Transactions: []eth.Data{ + make([]byte, 42), + make([]byte, 1337), + make([]byte, 0), + make([]byte, 1), + }}}), "mixed txs") +} + +func envelope(payload *eth.ExecutionPayload) *eth.ExecutionPayloadEnvelope { + return ð.ExecutionPayloadEnvelope{ExecutionPayload: payload} +} + +func TestPayloadsQueue(t *testing.T) { + pq := NewPayloadsQueue(testlog.Logger(t, log.LvlInfo), payloadMemFixedCost*3, payloadMemSize) + require.Equal(t, 0, pq.Len()) + require.Nil(t, pq.Peek()) + require.Nil(t, pq.Pop()) + + a := envelope(ð.ExecutionPayload{BlockNumber: 3, BlockHash: common.Hash{3}}) + b := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}) + c := envelope(ð.ExecutionPayload{BlockNumber: 5, BlockHash: common.Hash{5}}) + d := envelope(ð.ExecutionPayload{BlockNumber: 6, BlockHash: common.Hash{6}}) + bAlt := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{0xff}}) + bDup := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}) + + require.NoError(t, pq.Push(b)) + require.Equal(t, pq.Len(), 1) + require.Equal(t, pq.Peek(), b) + + require.Error(t, pq.Push(nil), "cannot add nil payloads") + + require.NoError(t, pq.Push(c)) + require.Equal(t, pq.Len(), 2) + require.Equal(t, pq.MemSize(), 2*payloadMemFixedCost) + require.Equal(t, pq.Peek(), b, "expecting b to still be the lowest number payload") + + require.NoError(t, pq.Push(a)) + require.Equal(t, pq.Len(), 3) + require.Equal(t, pq.MemSize(), 3*payloadMemFixedCost) + require.Equal(t, pq.Peek(), a, "expecting a to be new lowest number") + + require.Equal(t, pq.Pop(), a) + require.Equal(t, pq.Len(), 2, "expecting to pop the lowest") + + require.Equal(t, pq.Peek(), b, "expecting b to be lowest, compared to c") + + require.Equal(t, pq.Pop(), b) + require.Equal(t, pq.Len(), 1) + require.Equal(t, pq.MemSize(), payloadMemFixedCost) + + require.Equal(t, pq.Pop(), c) + require.Equal(t, pq.Len(), 0, "expecting no items to remain") + + e := envelope(ð.ExecutionPayload{BlockNumber: 5, Transactions: []eth.Data{make([]byte, payloadMemFixedCost*3+1)}}) + require.Error(t, pq.Push(e), "cannot add payloads that are too large") + + require.NoError(t, pq.Push(b)) + require.Equal(t, pq.Len(), 1, "expecting b") + require.Equal(t, pq.Peek(), b) + require.NoError(t, pq.Push(c)) + require.Equal(t, pq.Len(), 2, "expecting b, c") + require.Equal(t, pq.Peek(), b) + require.NoError(t, pq.Push(a)) + require.Equal(t, pq.Len(), 3, "expecting a, b, c") + require.Equal(t, pq.Peek(), a) + + // No duplicates allowed + require.Error(t, pq.Push(bDup)) + // But reorg data allowed + require.NoError(t, pq.Push(bAlt)) + + require.NoError(t, pq.Push(d)) + require.Equal(t, pq.Len(), 3) + require.Equal(t, pq.Peek(), b, "expecting b, c, d") + require.NotContainsf(t, pq.pq[:], a, "a should be dropped after 3 items already exist under max size constraint") +} + +func TestPayloadsQueue_ReaddAfterPopAllowed(t *testing.T) { + pq := NewPayloadsQueue(testlog.Logger(t, log.LvlInfo), payloadMemFixedCost*10, payloadMemSize) + b := envelope(ð.ExecutionPayload{BlockNumber: 4, BlockHash: common.Hash{4}}) + require.NoError(t, pq.Push(b)) + require.Equal(t, b, pq.Pop()) + // re-add same hash after pop should be allowed + require.NoError(t, pq.Push(b)) +} + +func TestDropInapplicable_PopsMultipleInapplicable(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + // queue: processed (=unsafe head), old<=safe, old<=unsafe, then applicable next + processed := envelope(ð.ExecutionPayload{BlockNumber: 10, BlockHash: common.Hash{0x10}}) + oldSafe := envelope(ð.ExecutionPayload{BlockNumber: 8, BlockHash: common.Hash{0x08}}) + oldUnsafe := envelope(ð.ExecutionPayload{BlockNumber: 9, BlockHash: common.Hash{0x09}}) + next := envelope(ð.ExecutionPayload{BlockNumber: 11, ParentHash: common.Hash{0x10}, BlockHash: common.Hash{0x11}}) + + require.NoError(t, pq.Push(processed)) + require.NoError(t, pq.Push(oldUnsafe)) + require.NoError(t, pq.Push(oldSafe)) + require.NoError(t, pq.Push(next)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: eth.L2BlockRef{Hash: common.Hash{0x10}, Number: 10}, + SafeL2Head: eth.L2BlockRef{Hash: common.Hash{0xaa}, Number: 9}, + FinalizedL2Head: eth.L2BlockRef{}, + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 1, pq.Len()) + require.Equal(t, next, pq.Peek()) +} + +func mkRef(number uint64, hash common.Hash) eth.L2BlockRef { + return eth.L2BlockRef{ + Hash: hash, + Number: number, + } +} + +func TestDropInapplicable_RemovesAlreadyProcessed(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + headHash := common.Hash{0xaa} + headNum := uint64(10) + processed := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(headNum), BlockHash: headHash}) + require.NoError(t, pq.Push(processed)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(headNum, headHash), + SafeL2Head: mkRef(0, common.Hash{}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 0, pq.Len()) +} + +func TestDropInapplicable_DropOlderThanSafe(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + payload := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(8), BlockHash: common.Hash{0x01}}) + require.NoError(t, pq.Push(payload)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(10, common.Hash{0xaa}), + SafeL2Head: mkRef(8, common.Hash{0xbb}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 0, pq.Len()) +} + +func TestDropInapplicable_DropOlderThanUnsafe(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + // Block is newer than safe head but not newer than unsafe head + payload := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(10), BlockHash: common.Hash{0x02}}) + require.NoError(t, pq.Push(payload)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(10, common.Hash{0xaa}), + SafeL2Head: mkRef(9, common.Hash{0xbb}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 0, pq.Len()) +} + +func TestDropInapplicable_DropNextHeightMismatch(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + headHash := common.Hash{0xaa} + // Next height but wrong parent + payload := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(11), BlockHash: common.Hash{0x03}, ParentHash: common.Hash{0xff}}) + require.NoError(t, pq.Push(payload)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(10, headHash), + SafeL2Head: mkRef(9, common.Hash{0xbb}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 0, pq.Len()) +} + +func TestDropInapplicable_NonAdjacentMismatchReturns(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + headHash := common.Hash{0xaa} + // Non-adjacent height and wrong parent => should return without popping + payload := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(12), BlockHash: common.Hash{0x04}, ParentHash: common.Hash{0xff}}) + require.NoError(t, pq.Push(payload)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(10, headHash), + SafeL2Head: mkRef(9, common.Hash{0xbb}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 1, pq.Len()) + require.Equal(t, payload, pq.Peek()) +} + +func TestDropInapplicable_ApplicablePayloadKept(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + pq := NewPayloadsQueue(logger, payloadMemFixedCost*10, payloadMemSize) + + headHash := common.Hash{0xaa} + // Correct parent and next height => should keep and break + payload := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(11), BlockHash: common.Hash{0x05}, ParentHash: headHash}) + require.NoError(t, pq.Push(payload)) + + ev := ForkchoiceUpdateEvent{ + UnsafeL2Head: mkRef(10, headHash), + SafeL2Head: mkRef(9, common.Hash{0xbb}), + FinalizedL2Head: mkRef(0, common.Hash{}), + } + + pq.DropInapplicableUnsafePayloads(ev) + require.Equal(t, 1, pq.Len()) + require.Equal(t, payload, pq.Peek()) +} + +// TestPayloadsQueue_Pop_SameElementFullQueue tests that we correctly Pop the same element, if it is to be popped, when the payloads queue is full. +func TestPayloadsQueue_Pop_SameElementFullQueue(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + + // pq is PayloadsQueue with MaxSize = payloadMemFixedCost, so space for a single payload with no txs + pq := NewPayloadsQueue(logger, payloadMemFixedCost, payloadMemSize) + + payload_13 := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(13), BlockHash: common.Hash{0x06}}) + require.NoError(t, pq.Push(payload_13)) + + payload_12 := envelope(ð.ExecutionPayload{BlockNumber: eth.Uint64Quantity(12), BlockHash: common.Hash{0x05}}) + require.Error(t, pq.Push(payload_12)) + + require.Equal(t, 1, pq.Len()) + _, ok := pq.blockHashes[payload_12.ExecutionPayload.BlockHash] + require.False(t, ok) +} diff --git a/op-node/rollup/event.go b/op-node/rollup/event.go index efd5f40cc0347..4e268240fd422 100644 --- a/op-node/rollup/event.go +++ b/op-node/rollup/event.go @@ -1,7 +1,6 @@ package rollup import ( - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/event" ) @@ -40,17 +39,5 @@ func (ev ResetEvent) String() string { return "reset-event" } -// ForceResetEvent forces a reset to a specific local-unsafe/local-safe/finalized starting point. -// Resets may override local-unsafe, to reset the very end of the chain. -// Resets may override local-safe, since post-interop we need the local-safe block derivation to continue. -// Pre-interop both local and cross values should be set the same. -type ForceResetEvent struct { - LocalUnsafe, CrossUnsafe, LocalSafe, CrossSafe, Finalized eth.L2BlockRef -} - -func (ev ForceResetEvent) String() string { - return "force-reset" -} - // CriticalErrorEvent is an alias for event.CriticalErrorEvent type CriticalErrorEvent = event.CriticalErrorEvent diff --git a/op-node/rollup/finality/altda.go b/op-node/rollup/finality/altda.go index 1c4edd2d02d1f..ae32725c70ef3 100644 --- a/op-node/rollup/finality/altda.go +++ b/op-node/rollup/finality/altda.go @@ -29,15 +29,15 @@ type AltDAFinalizer struct { func NewAltDAFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, - backend AltDABackend) *AltDAFinalizer { + backend AltDABackend, ec EngineController) *AltDAFinalizer { - inner := NewFinalizer(ctx, log, cfg, l1Fetcher) + inner := NewFinalizer(ctx, log, cfg, l1Fetcher, ec) // In alt-da mode, the finalization signal is proxied through the AltDA manager. // Finality signal will come from the DA contract or L1 finality whichever is last. // The AltDA module will then call the inner.Finalize function when applicable. backend.OnFinalizedHeadSignal(func(ref eth.L1BlockRef) { - inner.OnEvent(ctx, FinalizeL1Event{FinalizedL1: ref}) + inner.OnL1Finalized(ref) }) return &AltDAFinalizer{ @@ -47,11 +47,11 @@ func NewAltDAFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, } func (fi *AltDAFinalizer) OnEvent(ctx context.Context, ev event.Event) bool { - switch x := ev.(type) { - case FinalizeL1Event: - fi.backend.Finalize(x.FinalizedL1) - return true - default: - return fi.Finalizer.OnEvent(ctx, ev) - } + // TODO(#16917) Remove Event System Refactor Comments + // FinalizeL1Event is removed and OnL1Finalized is synchronously called at L1Handler + return fi.Finalizer.OnEvent(ctx, ev) +} + +func (fi *AltDAFinalizer) OnL1Finalized(l1Origin eth.L1BlockRef) { + fi.backend.Finalize(l1Origin) } diff --git a/op-node/rollup/finality/altda_test.go b/op-node/rollup/finality/altda_test.go index f2396df9472bf..a3c1a3843867d 100644 --- a/op-node/rollup/finality/altda_test.go +++ b/op-node/rollup/finality/altda_test.go @@ -35,6 +35,16 @@ func (b *fakeAltDABackend) OnFinalizedHeadSignal(f altda.HeadSignalFn) { var _ AltDABackend = (*fakeAltDABackend)(nil) +type fakeEngineController struct { + finalizedL2 eth.L2BlockRef +} + +var _ EngineController = (*fakeEngineController)(nil) + +func (f *fakeEngineController) PromoteFinalized(_ context.Context, ref eth.L2BlockRef) { + f.finalizedL2 = ref +} + func TestAltDAFinalityData(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) l1F := &testutils.MockL1Source{} @@ -97,7 +107,8 @@ func TestAltDAFinalityData(t *testing.T) { } emitter := &testutils.MockEmitter{} - fi := NewAltDAFinalizer(context.Background(), logger, cfg, l1F, altDABackend) + ec := new(fakeEngineController) + fi := NewAltDAFinalizer(context.Background(), logger, cfg, l1F, altDABackend, ec) fi.AttachEmitter(emitter) require.NotNil(t, altDABackend.forwardTo, "altda backend must have access to underlying standard finalizer") @@ -110,7 +121,7 @@ func TestAltDAFinalityData(t *testing.T) { // and post processing. for i := uint64(0); i < 200; i++ { if i == 10 { // finalize a L1 commitment - fi.OnEvent(context.Background(), FinalizeL1Event{FinalizedL1: l1parent}) + fi.OnL1Finalized(l1parent) emitter.AssertExpectations(t) // no events emitted upon L1 finality require.Equal(t, l1parent, commitmentInclusionFinalized, "altda backend received L1 signal") } @@ -167,20 +178,12 @@ func TestAltDAFinalityData(t *testing.T) { // of the safe block matches that of the finalized L1 block. l1F.ExpectL1BlockRefByNumber(commitmentInclusionFinalized.Number, commitmentInclusionFinalized, nil) l1F.ExpectL1BlockRefByNumber(commitmentInclusionFinalized.Number, commitmentInclusionFinalized, nil) - var finalizedL2 eth.L2BlockRef - emitter.ExpectOnceRun(func(ev event.Event) { - if x, ok := ev.(engine.PromoteFinalizedEvent); ok { - finalizedL2 = x.Ref - } else { - t.Fatalf("expected L2 finalization, but got: %s", ev) - } - }) fi.OnEvent(context.Background(), TryFinalizeEvent{}) l1F.AssertExpectations(t) emitter.AssertExpectations(t) - require.Equal(t, commitmentInclusionFinalized.Number, finalizedL2.L1Origin.Number+1) + require.Equal(t, commitmentInclusionFinalized.Number, ec.finalizedL2.L1Origin.Number+1) // Confirm finalization, so there will be no repeats of the PromoteFinalizedEvent - fi.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{FinalizedL2Head: finalizedL2}) + fi.OnEvent(context.Background(), engine.ForkchoiceUpdateEvent{FinalizedL2Head: ec.finalizedL2}) emitter.AssertExpectations(t) } } diff --git a/op-node/rollup/finality/finalizer.go b/op-node/rollup/finality/finalizer.go index e46354204ac16..4d520696d9440 100644 --- a/op-node/rollup/finality/finalizer.go +++ b/op-node/rollup/finality/finalizer.go @@ -66,6 +66,10 @@ type FinalizerL1Interface interface { L1BlockRefByNumber(context.Context, uint64) (eth.L1BlockRef, error) } +type EngineController interface { + PromoteFinalized(context.Context, eth.L2BlockRef) +} + type Finalizer struct { mu sync.Mutex @@ -77,6 +81,8 @@ type Finalizer struct { emitter event.Emitter + engineController EngineController + // finalizedL1 is the currently perceived finalized L1 block. // This may be ahead of the current traversed origin when syncing. finalizedL1 eth.L1BlockRef @@ -96,13 +102,14 @@ type Finalizer struct { l1Fetcher FinalizerL1Interface } -func NewFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface) *Finalizer { +func NewFinalizer(ctx context.Context, log log.Logger, cfg *rollup.Config, l1Fetcher FinalizerL1Interface, ec EngineController) *Finalizer { lookback := calcFinalityLookback(cfg) return &Finalizer{ ctx: ctx, cfg: cfg, log: log, finalizedL1: eth.L1BlockRef{}, + engineController: ec, triedFinalizeAt: 0, finalityData: make([]FinalityData, 0, lookback), finalityLookback: lookback, @@ -123,14 +130,6 @@ func (fi *Finalizer) FinalizedL1() (out eth.L1BlockRef) { return } -type FinalizeL1Event struct { - FinalizedL1 eth.L1BlockRef -} - -func (ev FinalizeL1Event) String() string { - return "finalized-l1" -} - type TryFinalizeEvent struct { } @@ -139,9 +138,9 @@ func (ev TryFinalizeEvent) String() string { } func (fi *Finalizer) OnEvent(ctx context.Context, ev event.Event) bool { + // TODO(#16917) Remove Event System Refactor Comments + // FinalizeL1Event is removed and OnL1Finalized is synchronously called at L1Handler switch x := ev.(type) { - case FinalizeL1Event: - fi.onL1Finalized(x.FinalizedL1) case engine.SafeDerivedEvent: fi.onDerivedSafeBlock(x.Safe, x.Source) case derive.DeriverIdleEvent: @@ -159,7 +158,7 @@ func (fi *Finalizer) OnEvent(ctx context.Context, ev event.Event) bool { } // onL1Finalized applies a L1 finality signal -func (fi *Finalizer) onL1Finalized(l1Origin eth.L1BlockRef) { +func (fi *Finalizer) OnL1Finalized(l1Origin eth.L1BlockRef) { fi.mu.Lock() defer fi.mu.Unlock() prevFinalizedL1 := fi.finalizedL1 @@ -255,7 +254,7 @@ func (fi *Finalizer) tryFinalize() { }) return } - fi.emitter.Emit(fi.ctx, engine.PromoteFinalizedEvent{Ref: finalizedL2}) + fi.engineController.PromoteFinalized(ctx, finalizedL2) } } diff --git a/op-node/rollup/finality/finalizer_test.go b/op-node/rollup/finality/finalizer_test.go index c86c7d4d4bbe6..4ab1cf492c19d 100644 --- a/op-node/rollup/finality/finalizer_test.go +++ b/op-node/rollup/finality/finalizer_test.go @@ -193,7 +193,8 @@ func TestEngineQueue_Finalize(t *testing.T) { l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) emitter := &testutils.MockEmitter{} - fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) + ec := new(fakeEngineController) + fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, ec) fi.AttachEmitter(emitter) // now say C1 was included in D and became the new safe head @@ -209,13 +210,12 @@ func TestEngineQueue_Finalize(t *testing.T) { // Let's finalize D from which we fully derived C1, but not D0 // This will trigger an attempt of L2 finalization. emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refD}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refD) // C1 was included in finalized D, and should now be finalized - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) fi.OnEvent(ctx, TryFinalizeEvent{}) emitter.AssertExpectations(t) + require.Equal(t, refC1, ec.finalizedL2) }) // Finality signal is received, but couldn't immediately be checked @@ -228,7 +228,8 @@ func TestEngineQueue_Finalize(t *testing.T) { l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) // to check what was derived from (same in this case) emitter := &testutils.MockEmitter{} - fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) + ec := new(fakeEngineController) + fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, ec) fi.AttachEmitter(emitter) // now say C1 was included in D and became the new safe head @@ -243,8 +244,7 @@ func TestEngineQueue_Finalize(t *testing.T) { // let's finalize D from which we fully derived C1, but not D0 emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refD}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refD) // C1 was included in finalized D, but finality could not be verified yet, due to temporary test error emitter.ExpectOnceType("L1TemporaryErrorEvent") fi.OnEvent(ctx, TryFinalizeEvent{}) @@ -256,9 +256,8 @@ func TestEngineQueue_Finalize(t *testing.T) { emitter.AssertExpectations(t) // C1 was included in finalized D, and should now be finalized, as check can succeed when revisited - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) fi.OnEvent(ctx, TryFinalizeEvent{}) - emitter.AssertExpectations(t) + require.Equal(t, refC1, ec.finalizedL2) }) // Test that finality progression can repeat a few times. @@ -268,7 +267,8 @@ func TestEngineQueue_Finalize(t *testing.T) { defer l1F.AssertExpectations(t) emitter := &testutils.MockEmitter{} - fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) + ec := new(fakeEngineController) + fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, ec) fi.AttachEmitter(emitter) fi.OnEvent(ctx, engine.SafeDerivedEvent{Safe: refC1, Source: refD}) @@ -281,27 +281,25 @@ func TestEngineQueue_Finalize(t *testing.T) { // L1 finality signal will trigger L2 finality attempt emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refD}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refD) // C1 was included in D, and should be finalized now - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC1}) l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refC1, ec.finalizedL2) emitter.AssertExpectations(t) l1F.AssertExpectations(t) // Another L1 finality event, trigger L2 finality attempt again emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refE}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refE) // D0 was included in E, and should be finalized now - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refD0}) l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refD0, ec.finalizedL2) emitter.AssertExpectations(t) l1F.AssertExpectations(t) @@ -332,14 +330,13 @@ func TestEngineQueue_Finalize(t *testing.T) { // Now L1 block H is actually finalized, and we can proceed with another attempt emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refH}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refH) // F1 should be finalized now, since it was included in H - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refF1}) l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) l1F.ExpectL1BlockRefByNumber(refH.Number, refH, nil) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refF1, ec.finalizedL2) emitter.AssertExpectations(t) l1F.AssertExpectations(t) }) @@ -354,7 +351,8 @@ func TestEngineQueue_Finalize(t *testing.T) { l1F.ExpectL1BlockRefByNumber(refC.Number, refC, nil) // check what we derived the L2 block from emitter := &testutils.MockEmitter{} - fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) + ec := new(fakeEngineController) + fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, ec) fi.AttachEmitter(emitter) // now say B1 was included in C and became the new safe head @@ -369,12 +367,11 @@ func TestEngineQueue_Finalize(t *testing.T) { // let's finalize D, from which we fully derived B1, but not C0 (referenced L1 origin in L2 block != inclusion of L2 block in L1 chain) emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refD}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refD) // B1 was included in finalized D, and should now be finalized - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refB1}) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refB1, ec.finalizedL2) emitter.AssertExpectations(t) }) @@ -391,7 +388,8 @@ func TestEngineQueue_Finalize(t *testing.T) { l1F.ExpectL1BlockRefByNumber(refE.Number, refE, nil) // post-reorg emitter := &testutils.MockEmitter{} - fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F) + ec := new(fakeEngineController) + fi := NewFinalizer(context.Background(), logger, &rollup.Config{}, l1F, ec) fi.AttachEmitter(emitter) // now say B1 was included in C and became the new safe head @@ -431,8 +429,8 @@ func TestEngineQueue_Finalize(t *testing.T) { // It should be detected that C0Alt and C1Alt cannot actually be finalized, // even though they are older than the latest finality signal. emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refF}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refF) + // cannot verify refC0Alt and refC1Alt, and refB1 is older and not checked emitter.ExpectOnceType("ResetEvent") fi.OnEvent(ctx, TryFinalizeEvent{}) @@ -470,8 +468,8 @@ func TestEngineQueue_Finalize(t *testing.T) { emitter.ExpectOnce(TryFinalizeEvent{}) fi.OnEvent(ctx, derive.DeriverIdleEvent{Origin: refE}) emitter.AssertExpectations(t) - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC0}) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refC0, ec.finalizedL2) emitter.AssertExpectations(t) }) @@ -485,9 +483,10 @@ func TestEngineQueue_Finalize(t *testing.T) { l1F.ExpectL1BlockRefByNumber(refD.Number, refD, nil) emitter := &testutils.MockEmitter{} + ec := new(fakeEngineController) fi := NewFinalizer(context.Background(), logger, &rollup.Config{ InteropTime: &refC1.Time, - }, l1F) + }, l1F, ec) fi.AttachEmitter(emitter) // now say C0 and C1 were included in D and became the new safe head @@ -497,12 +496,11 @@ func TestEngineQueue_Finalize(t *testing.T) { emitter.AssertExpectations(t) emitter.ExpectOnce(TryFinalizeEvent{}) - fi.OnEvent(ctx, FinalizeL1Event{FinalizedL1: refD}) - emitter.AssertExpectations(t) + fi.OnL1Finalized(refD) // C1 was Interop, C0 was not yet interop and can be finalized - emitter.ExpectOnce(engine.PromoteFinalizedEvent{Ref: refC0}) fi.OnEvent(ctx, TryFinalizeEvent{}) + require.Equal(t, refC0, ec.finalizedL2) emitter.AssertExpectations(t) }) } diff --git a/op-node/rollup/interop/indexing/attributes.go b/op-node/rollup/interop/indexing/attributes.go index 0dfa09aab7fab..28d5782ef3fb8 100644 --- a/op-node/rollup/interop/indexing/attributes.go +++ b/op-node/rollup/interop/indexing/attributes.go @@ -54,7 +54,7 @@ func AttributesToReplaceInvalidBlock(invalidatedBlock *eth.ExecutionPayloadEnvel // unfortunately, the engine API needs the inner value, not the extra-data. // So we translate it here. extraData := invalidatedBlock.ExecutionPayload.ExtraData - denominator, elasticity := eip1559.DecodeHoloceneExtraData(extraData) + denominator, elasticity, minBaseFee := eip1559.DecodeMinBaseFeeExtraData(extraData) eip1559Params := eth.Bytes8(eip1559.EncodeHolocene1559Params(denominator, elasticity)) attrs := ð.PayloadAttributes{ @@ -67,6 +67,7 @@ func AttributesToReplaceInvalidBlock(invalidatedBlock *eth.ExecutionPayloadEnvel NoTxPool: true, GasLimit: &gasLimit, EIP1559Params: &eip1559Params, + MinBaseFee: minBaseFee, } return attrs } diff --git a/op-node/rollup/interop/indexing/system.go b/op-node/rollup/interop/indexing/system.go index e316fa9ccd424..61a7d9c519465 100644 --- a/op-node/rollup/interop/indexing/system.go +++ b/op-node/rollup/interop/indexing/system.go @@ -45,6 +45,11 @@ type L1Source interface { L1BlockRefByHash(ctx context.Context, hash common.Hash) (eth.L1BlockRef, error) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) } +type EngineController interface { + ForceReset(ctx context.Context, localUnsafe, crossUnsafe, localSafe, crossSafe, finalized eth.L2BlockRef) + PromoteSafe(ctx context.Context, ref eth.L2BlockRef, source eth.L1BlockRef) + PromoteFinalized(ctx context.Context, ref eth.L2BlockRef) +} // IndexingMode makes the op-node managed by an op-supervisor, // by serving sync work and updating the canonical chain based on instructions. @@ -73,6 +78,8 @@ type IndexingMode struct { srv *rpc.Server jwtSecret eth.Bytes32 + + engineController EngineController } func NewIndexingMode(log log.Logger, cfg *rollup.Config, addr string, port int, jwtSecret eth.Bytes32, l1 L1Source, l2 L2Source, m opmetrics.RPCMetricer) *IndexingMode { @@ -111,6 +118,10 @@ func NewIndexingMode(log log.Logger, cfg *rollup.Config, addr string, port int, return out } +func (m *IndexingMode) SetEngineController(engineController EngineController) { + m.engineController = engineController +} + // TestDisableEventDeduplication is a test-only function that disables event deduplication. // It is necessary to make action tests work. func (m *IndexingMode) TestDisableEventDeduplication() { @@ -288,12 +299,7 @@ func (m *IndexingMode) UpdateCrossSafe(ctx context.Context, derived eth.BlockID, if err != nil { return fmt.Errorf("failed to get L1BlockRef: %w", err) } - m.emitter.Emit(m.ctx, engine.PromoteSafeEvent{ - Ref: l2Ref, - Source: l1Ref, - }) - // We return early: there is no point waiting for the cross-safe engine-update synchronously. - // All error-feedback comes to the supervisor by aborting derivation tasks with an error. + m.engineController.PromoteSafe(ctx, l2Ref, l1Ref) return nil } @@ -302,9 +308,7 @@ func (m *IndexingMode) UpdateFinalized(ctx context.Context, id eth.BlockID) erro if err != nil { return fmt.Errorf("failed to get L2BlockRef: %w", err) } - m.emitter.Emit(m.ctx, engine.PromoteFinalizedEvent{Ref: l2Ref}) - // We return early: there is no point waiting for the finalized engine-update synchronously. - // All error-feedback comes to the supervisor by aborting derivation tasks with an error. + m.engineController.PromoteFinalized(ctx, l2Ref) return nil } @@ -450,13 +454,7 @@ func (m *IndexingMode) Reset(ctx context.Context, lUnsafe, xUnsafe, lSafe, xSafe return err } - m.emitter.Emit(ctx, rollup.ForceResetEvent{ - LocalUnsafe: lUnsafeRef, - CrossUnsafe: xUnsafeRef, - LocalSafe: lSafeRef, - CrossSafe: xSafeRef, - Finalized: finalizedRef, - }) + m.engineController.ForceReset(ctx, lUnsafeRef, xUnsafeRef, lSafeRef, xSafeRef, finalizedRef) return nil } diff --git a/op-node/rollup/sequencing/origin_selector.go b/op-node/rollup/sequencing/origin_selector.go index 938c16affe4d5..255522d46441e 100644 --- a/op-node/rollup/sequencing/origin_selector.go +++ b/op-node/rollup/sequencing/origin_selector.go @@ -54,12 +54,16 @@ func (los *L1OriginSelector) SetRecoverMode(enabled bool) { los.recoverMode.Store(enabled) } +func (los *L1OriginSelector) ResetOrigins() { + los.reset() +} + func (los *L1OriginSelector) OnEvent(ctx context.Context, ev event.Event) bool { switch x := ev.(type) { case engine.ForkchoiceUpdateEvent: los.onForkchoiceUpdate(x.UnsafeL2Head) - case rollup.ResetEvent, rollup.ForceResetEvent: - los.reset() + case rollup.ResetEvent: + los.ResetOrigins() default: return false } diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index 845ab2f93505b..4a5d358d8cafd 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/attributes" "github.com/ethereum-optimism/optimism/op-node/rollup/conductor" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/engine" @@ -103,6 +104,8 @@ type Sequencer struct { emitter event.Emitter + eng attributes.EngineController + attrBuilder derive.AttributesBuilder l1OriginSelector L1OriginSelectorIface @@ -136,6 +139,7 @@ func NewSequencer(driverCtx context.Context, log log.Logger, rollupCfg *rollup.C conductor conductor.SequencerConductor, asyncGossip AsyncGossiper, metrics Metrics, + eng attributes.EngineController, dacClient engine.DACClient, ) *Sequencer { return &Sequencer{ @@ -149,6 +153,7 @@ func NewSequencer(driverCtx context.Context, log log.Logger, rollupCfg *rollup.C attrBuilder: attributesBuilder, l1OriginSelector: l1OriginSelector, metrics: metrics, + eng: eng, timeNow: time.Now, toBlockRef: derive.PayloadToBlockRef, dacClient: dacClient, @@ -510,7 +515,7 @@ func (d *Sequencer) startBuildingBlock() { // If we do not have data to know what to build on, then request a forkchoice update if l2Head == (eth.L2BlockRef{}) { - d.emitter.Emit(d.ctx, engine.ForkchoiceRequestEvent{}) + d.eng.RequestForkchoiceUpdate(d.ctx) return } // If we have already started trying to build on top of this block, we can avoid starting over again. @@ -671,7 +676,7 @@ func (d *Sequencer) Init(ctx context.Context, active bool) error { d.asyncGossip.Start() // The `latestHead` should be updated, so we can handle start-sequencer requests - d.emitter.Emit(d.ctx, engine.ForkchoiceRequestEvent{}) + d.eng.RequestForkchoiceUpdate(d.ctx) if active { return d.forceStart() diff --git a/op-node/rollup/sequencing/sequencer_chaos_test.go b/op-node/rollup/sequencing/sequencer_chaos_test.go index c253d096e3bc7..5bccf0d3f8138 100644 --- a/op-node/rollup/sequencing/sequencer_chaos_test.go +++ b/op-node/rollup/sequencing/sequencer_chaos_test.go @@ -192,12 +192,6 @@ func (c *ChaoticEngine) OnEvent(ctx context.Context, ev event.Event) bool { case engine.BuildCancelEvent: c.currentPayloadInfo = eth.PayloadInfo{} c.currentAttributes = nil - case engine.ForkchoiceRequestEvent: - c.emitter.Emit(ctx, engine.ForkchoiceUpdateEvent{ - UnsafeL2Head: c.unsafe, - SafeL2Head: c.safe, - FinalizedL2Head: c.finalized, - }) case engine.PayloadProcessEvent: // Move forward time, to simulate time consumption c.clockRandomIncrement(0, time.Millisecond*500) @@ -226,7 +220,11 @@ func (c *ChaoticEngine) OnEvent(ctx context.Context, ev event.Event) bool { Ref: x.Ref, }) // With event delay, the engine would update and signal the new forkchoice. - c.emitter.Emit(ctx, engine.ForkchoiceRequestEvent{}) + c.emitter.Emit(ctx, engine.ForkchoiceUpdateEvent{ + UnsafeL2Head: c.unsafe, + SafeL2Head: c.safe, + FinalizedL2Head: c.finalized, + }) } default: return false @@ -324,6 +322,14 @@ func testSequencerChaosWithSeed(t *testing.T, seed int64) { require.NoError(t, seq.Init(context.Background(), true)) require.NoError(t, ex.Drain(), "initial forkchoice update etc. completes") + // TODO(#16917): direct call used now; no ForkchoiceRequestEvent expected + // Provide initial forkchoice so the sequencer has a prestate to build on + testEm.Emit(context.Background(), engine.ForkchoiceUpdateEvent{ + UnsafeL2Head: genesisRef, + SafeL2Head: genesisRef, + FinalizedL2Head: genesisRef, + }) + genesisTime := time.Unix(int64(deps.cfg.Genesis.L2Time), 0) i := 0 diff --git a/op-node/rollup/sequencing/sequencer_test.go b/op-node/rollup/sequencing/sequencer_test.go index 01f0759e07a84..2148ebd33e312 100644 --- a/op-node/rollup/sequencing/sequencer_test.go +++ b/op-node/rollup/sequencing/sequencer_test.go @@ -161,6 +161,18 @@ func (f *FakeAsyncGossip) Start() { var _ AsyncGossiper = (*FakeAsyncGossip)(nil) +type fakeEngController struct{} + +func (fakeEngController) RequestForkchoiceUpdate(ctx context.Context) {} + +func (fakeEngController) TryUpdatePendingSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { +} + +func (fakeEngController) TryUpdateLocalSafe(ctx context.Context, ref eth.L2BlockRef, concluding bool, source eth.L1BlockRef) { +} + +func (fakeEngController) RequestPendingSafeUpdate(ctx context.Context) {} + // TestSequencer_StartStop runs through start/stop state back and forth to test state changes. func TestSequencer_StartStop(t *testing.T) { logger := testlog.Logger(t, log.LevelError) @@ -174,7 +186,7 @@ func TestSequencer_StartStop(t *testing.T) { deps.conductor.leader = true testCtx := context.Background() - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // TODO(#16917): direct call used now; no ForkchoiceRequestEvent expected require.NoError(t, seq.Init(testCtx, false)) emitter.AssertExpectations(t) require.False(t, deps.conductor.closed, "conductor is ready") @@ -264,7 +276,7 @@ func TestSequencer_StaleBuild(t *testing.T) { deps.conductor.leader = true testCtx := context.Background() - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // TODO(#16917): direct call used now; no ForkchoiceRequestEvent expected require.NoError(t, seq.Init(testCtx, false)) emitter.AssertExpectations(t) require.False(t, deps.conductor.closed, "conductor is ready") @@ -474,13 +486,13 @@ func TestSequencerBuild(t *testing.T) { testCtx := context.Background() // Init will request a forkchoice update - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // TODO(#16917): direct call used now; no ForkchoiceRequestEvent expected require.NoError(t, seq.Init(testCtx, true)) emitter.AssertExpectations(t) require.True(t, seq.Active(), "started in active mode") // It will request a forkchoice update, it needs the head before being able to build on top of it - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // TODO(#16917): direct call used now; no ForkchoiceRequestEvent expected seq.OnEvent(context.Background(), SequencerActionEvent{}) emitter.AssertExpectations(t) @@ -631,16 +643,12 @@ func TestSequencerL1TemporaryErrorEvent(t *testing.T) { seq.AttachEmitter(emitter) testCtx := context.Background() - // Init will request a forkchoice update - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // Init require.NoError(t, seq.Init(testCtx, true)) - emitter.AssertExpectations(t) require.True(t, seq.Active(), "started in active mode") - // It will request a forkchoice update, it needs the head before being able to build on top of it - emitter.ExpectOnce(engine.ForkchoiceRequestEvent{}) + // It needs the head before being able to build on top of it seq.OnEvent(context.Background(), SequencerActionEvent{}) - emitter.AssertExpectations(t) // Now send the forkchoice data, for the sequencer to learn what to build on top of. head := eth.L2BlockRef{ @@ -708,6 +716,8 @@ func createSequencer(log log.Logger) (*Sequencer, *sequencerTestDeps) { FjordTime: new(uint64), GraniteTime: new(uint64), HoloceneTime: new(uint64), + IsthmusTime: new(uint64), + JovianTime: new(uint64), } deps := &sequencerTestDeps{ cfg: cfg, @@ -723,7 +733,7 @@ func createSequencer(log log.Logger) (*Sequencer, *sequencerTestDeps) { } seq := NewSequencer(context.Background(), log, cfg, deps.attribBuilder, deps.l1OriginSelector, deps.seqState, deps.conductor, - deps.asyncGossip, metrics.NoopMetrics, nil) + deps.asyncGossip, metrics.NoopMetrics, fakeEngController{}, nil) // We create mock payloads, with the epoch-id as tx[0], rather than proper L1Block-info deposit tx. seq.toBlockRef = func(rollupCfg *rollup.Config, payload *eth.ExecutionPayload) (eth.L2BlockRef, error) { return eth.L2BlockRef{ diff --git a/op-node/rollup/status/l1_tracker.go b/op-node/rollup/status/l1_tracker.go index 514babd9f2054..4106c9335e9f7 100644 --- a/op-node/rollup/status/l1_tracker.go +++ b/op-node/rollup/status/l1_tracker.go @@ -5,12 +5,10 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/event" ) // L1Tracker implements the L1Fetcher interface while proactively maintaining a reorg-aware cache -// of L1 block references by number. This handles the L1UnsafeEvent in order to populate the cache with -// the latest L1 block references. +// of L1 block references by number. Populate the cache with the latest L1 block references. type L1Tracker struct { derive.L1Fetcher cache *l1HeadBuffer @@ -23,15 +21,8 @@ func NewL1Tracker(inner derive.L1Fetcher) *L1Tracker { } } -func (st *L1Tracker) OnEvent(ctx context.Context, ev event.Event) bool { - switch x := ev.(type) { - case L1UnsafeEvent: - st.cache.Insert(x.L1Unsafe) - default: - return false - } - - return true +func (st *L1Tracker) OnL1Unsafe(l1Unsafe eth.BlockRef) { + st.cache.Insert(l1Unsafe) } func (l *L1Tracker) L1BlockRefByNumber(ctx context.Context, num uint64) (eth.L1BlockRef, error) { diff --git a/op-node/rollup/status/l1_tracker_test.go b/op-node/rollup/status/l1_tracker_test.go index 8e2ee3600affd..98e7a1ee0772f 100644 --- a/op-node/rollup/status/l1_tracker_test.go +++ b/op-node/rollup/status/l1_tracker_test.go @@ -14,10 +14,8 @@ func mockL1BlockRef(num uint64) eth.L1BlockRef { return eth.L1BlockRef{Number: num, Hash: common.Hash{byte(num)}, ParentHash: common.Hash{byte(num - 1)}} } -func newL1HeadEvent(l1Tracker *L1Tracker, head eth.L1BlockRef) { - l1Tracker.OnEvent(context.Background(), L1UnsafeEvent{ - L1Unsafe: head, - }) +func newL1Head(l1Tracker *L1Tracker, head eth.L1BlockRef) { + l1Tracker.OnL1Unsafe(head) } func TestCachingHeadReorg(t *testing.T) { @@ -35,21 +33,21 @@ func TestCachingHeadReorg(t *testing.T) { // from cache l1Head = mockL1BlockRef(100) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(102) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -57,7 +55,7 @@ func TestCachingHeadReorg(t *testing.T) { // trigger a reorg of block 102 l1Head = mockL1BlockRef(102) l1Head.Hash = common.Hash{0xde, 0xad, 0xbe, 0xef} - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -83,28 +81,28 @@ func TestCachingHeadRewind(t *testing.T) { // from cache l1Head = mockL1BlockRef(100) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(102) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) // 101 is the new head, invalidating 102 l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -138,21 +136,21 @@ func TestCachingChainShorteningReorg(t *testing.T) { // from cache l1Head = mockL1BlockRef(100) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 100) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(102) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -160,7 +158,7 @@ func TestCachingChainShorteningReorg(t *testing.T) { // trigger a reorg of block 101, invalidating the following cache elements (102) l1Head = mockL1BlockRef(101) l1Head.Hash = common.Hash{0xde, 0xad, 0xbe, 0xef} - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -180,21 +178,21 @@ func TestCachingDeepReorg(t *testing.T) { // from cache l1Head := mockL1BlockRef(100) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err := l1Tracker.L1BlockRefByNumber(ctx, 100) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(102) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -203,7 +201,7 @@ func TestCachingDeepReorg(t *testing.T) { parentHash := common.Hash{0xde, 0xad, 0xbe, 0xef} l1Head = mockL1BlockRef(102) l1Head.ParentHash = parentHash - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 102) require.NoError(t, err) require.Equal(t, l1Head, ret) @@ -230,21 +228,21 @@ func TestCachingSkipAhead(t *testing.T) { // from cache l1Head := mockL1BlockRef(100) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err := l1Tracker.L1BlockRefByNumber(ctx, 100) require.NoError(t, err) require.Equal(t, l1Head, ret) // from cache l1Head = mockL1BlockRef(101) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 101) require.NoError(t, err) require.Equal(t, l1Head, ret) // head jumps ahead from 101->103, invalidating the entire cache l1Head = mockL1BlockRef(103) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) ret, err = l1Tracker.L1BlockRefByNumber(ctx, 103) require.NoError(t, err) require.Equal(t, mockL1BlockRef(103), ret) @@ -266,7 +264,7 @@ func TestCacheSizeEviction(t *testing.T) { // insert 1000 elements into the cache for idx := 1000; idx < 2000; idx++ { l1Head := mockL1BlockRef(uint64(idx)) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) } // request each element from cache @@ -278,7 +276,7 @@ func TestCacheSizeEviction(t *testing.T) { // insert 1001st element, removing the first l1Head := mockL1BlockRef(2000) - newL1HeadEvent(l1Tracker, l1Head) + newL1Head(l1Tracker, l1Head) // request first element, which now requires a live fetch instead l1Fetcher.ExpectL1BlockRefByNumber(1000, mockL1BlockRef(1000), nil) diff --git a/op-node/rollup/status/status.go b/op-node/rollup/status/status.go index 3f7d0727a8ff9..b6419f26f2c6b 100644 --- a/op-node/rollup/status/status.go +++ b/op-node/rollup/status/status.go @@ -10,26 +10,12 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/engine" - "github.com/ethereum-optimism/optimism/op-node/rollup/finality" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/event" ) -type L1UnsafeEvent struct { - L1Unsafe eth.L1BlockRef -} - -func (ev L1UnsafeEvent) String() string { - return "l1-unsafe" -} - -type L1SafeEvent struct { - L1Safe eth.L1BlockRef -} - -func (ev L1SafeEvent) String() string { - return "l1-safe" -} +// Compile-time interface compliance check +var _ engine.CrossUpdateHandler = (*StatusTracker)(nil) type Metrics interface { RecordL1ReorgDepth(d uint64) @@ -59,6 +45,9 @@ func NewStatusTracker(log log.Logger, metrics Metrics) *StatusTracker { } func (st *StatusTracker) OnEvent(ctx context.Context, ev event.Event) bool { + // TODO(#16917) Remove Event System Refactor Comments + // L1UnsafeEvent, L1SafeEvent is removed and OnL1Unsafe is synchronously called at L1Handler + // FinalizeL1Event is removed and OnL1Finalized is synchronously called at L1Handler st.mu.Lock() defer st.mu.Unlock() @@ -74,49 +63,11 @@ func (st *StatusTracker) OnEvent(ctx context.Context, ev event.Event) bool { case engine.PendingSafeUpdateEvent: st.data.UnsafeL2 = x.Unsafe st.data.PendingSafeL2 = x.PendingSafe - case engine.CrossUnsafeUpdateEvent: - st.log.Debug("Cross unsafe head updated", "cross_unsafe", x.CrossUnsafe, "local_unsafe", x.LocalUnsafe) - st.data.CrossUnsafeL2 = x.CrossUnsafe - st.data.UnsafeL2 = x.LocalUnsafe case engine.LocalSafeUpdateEvent: st.log.Debug("Local safe head updated", "local_safe", x.Ref) st.data.LocalSafeL2 = x.Ref - case engine.CrossSafeUpdateEvent: - st.log.Debug("Cross safe head updated", "cross_safe", x.CrossSafe, "local_safe", x.LocalSafe) - st.data.SafeL2 = x.CrossSafe - st.data.LocalSafeL2 = x.LocalSafe case derive.DeriverL1StatusEvent: st.data.CurrentL1 = x.Origin - case L1UnsafeEvent: - st.metrics.RecordL1Ref("l1_head", x.L1Unsafe) - // We don't need to do anything if the head hasn't changed. - if st.data.HeadL1 == (eth.L1BlockRef{}) { - st.log.Info("Received first L1 head signal", "l1_head", x.L1Unsafe) - } else if st.data.HeadL1.Hash == x.L1Unsafe.Hash { - st.log.Trace("Received L1 head signal that is the same as the current head", "l1_head", x.L1Unsafe) - } else if st.data.HeadL1.Hash == x.L1Unsafe.ParentHash { - // We got a new L1 block whose parent hash is the same as the current L1 head. Means we're - // dealing with a linear extension (new block is the immediate child of the old one). - st.log.Debug("L1 head moved forward", "l1_head", x.L1Unsafe) - } else { - if st.data.HeadL1.Number >= x.L1Unsafe.Number { - st.metrics.RecordL1ReorgDepth(st.data.HeadL1.Number - x.L1Unsafe.Number) - } - // New L1 block is not the same as the current head or a single step linear extension. - // This could either be a long L1 extension, or a reorg, or we simply missed a head update. - st.log.Warn("L1 head signal indicates a possible L1 re-org", - "old_l1_head", st.data.HeadL1, "new_l1_head_parent", x.L1Unsafe.ParentHash, "new_l1_head", x.L1Unsafe) - } - st.data.HeadL1 = x.L1Unsafe - case L1SafeEvent: - st.log.Info("New L1 safe block", "l1_safe", x.L1Safe) - st.metrics.RecordL1Ref("l1_safe", x.L1Safe) - st.data.SafeL1 = x.L1Safe - case finality.FinalizeL1Event: - st.log.Info("New L1 finalized block", "l1_finalized", x.FinalizedL1) - st.metrics.RecordL1Ref("l1_finalized", x.FinalizedL1) - st.data.FinalizedL1 = x.FinalizedL1 - st.data.CurrentL1Finalized = x.FinalizedL1 case rollup.ResetEvent: st.data.UnsafeL2 = eth.L2BlockRef{} st.data.SafeL2 = eth.L2BlockRef{} @@ -132,6 +83,11 @@ func (st *StatusTracker) OnEvent(ctx context.Context, ev event.Event) bool { return false } + st.UpdateSyncStatus() + return true +} + +func (st *StatusTracker) UpdateSyncStatus() { // If anything changes, then copy the state to the published SyncStatus // @dev: If this becomes a performance bottleneck during sync (because mem copies onto heap, and 1KB comparisons), // we can rate-limit updates of the published data. @@ -140,7 +96,45 @@ func (st *StatusTracker) OnEvent(ctx context.Context, ev event.Event) bool { published = st.data st.published.Store(&published) } - return true +} + +func (st *StatusTracker) OnL1Unsafe(x eth.L1BlockRef) { + st.metrics.RecordL1Ref("l1_head", x) + // We don't need to do anything if the head hasn't changed. + if st.data.HeadL1 == (eth.L1BlockRef{}) { + st.log.Info("Received first L1 head signal", "l1_head", x) + } else if st.data.HeadL1.Hash == x.Hash { + st.log.Trace("Received L1 head signal that is the same as the current head", "l1_head", x) + } else if st.data.HeadL1.Hash == x.ParentHash { + // We got a new L1 block whose parent hash is the same as the current L1 head. Means we're + // dealing with a linear extension (new block is the immediate child of the old one). + st.log.Debug("L1 head moved forward", "l1_head", x) + } else { + if st.data.HeadL1.Number >= x.Number { + st.metrics.RecordL1ReorgDepth(st.data.HeadL1.Number - x.Number) + } + // New L1 block is not the same as the current head or a single step linear extension. + // This could either be a long L1 extension, or a reorg, or we simply missed a head update. + st.log.Warn("L1 head signal indicates a possible L1 re-org", + "old_l1_head", st.data.HeadL1, "new_l1_head_parent", x.ParentHash, "new_l1_head", x) + } + st.data.HeadL1 = x + st.UpdateSyncStatus() +} + +func (st *StatusTracker) OnL1Safe(x eth.L1BlockRef) { + st.log.Info("New L1 safe block", "l1_safe", x) + st.metrics.RecordL1Ref("l1_safe", x) + st.data.SafeL1 = x + st.UpdateSyncStatus() +} + +func (st *StatusTracker) OnL1Finalized(x eth.L1BlockRef) { + st.log.Info("New L1 finalized block", "l1_finalized", x) + st.metrics.RecordL1Ref("l1_finalized", x) + st.data.FinalizedL1 = x + st.data.CurrentL1Finalized = x + st.UpdateSyncStatus() } // SyncStatus is thread safe, and reads the latest view of L1 and L2 block labels @@ -152,3 +146,25 @@ func (st *StatusTracker) SyncStatus() *eth.SyncStatus { func (st *StatusTracker) L1Head() eth.L1BlockRef { return st.SyncStatus().HeadL1 } + +func (st *StatusTracker) OnCrossUnsafeUpdate(ctx context.Context, crossUnsafe eth.L2BlockRef, localUnsafe eth.L2BlockRef) { + st.mu.Lock() + defer st.mu.Unlock() + + st.log.Debug("Cross unsafe head updated", "cross_unsafe", crossUnsafe, "local_unsafe", localUnsafe) + st.data.CrossUnsafeL2 = crossUnsafe + st.data.UnsafeL2 = localUnsafe + + st.UpdateSyncStatus() +} + +func (st *StatusTracker) OnCrossSafeUpdate(ctx context.Context, crossSafe eth.L2BlockRef, localSafe eth.L2BlockRef) { + st.mu.Lock() + defer st.mu.Unlock() + + st.log.Debug("Cross safe head updated", "cross_safe", crossSafe, "local_safe", localSafe) + st.data.SafeL2 = crossSafe + st.data.LocalSafeL2 = localSafe + + st.UpdateSyncStatus() +} diff --git a/op-node/rollup/toggles.go b/op-node/rollup/toggles.go new file mode 100644 index 0000000000000..d49f1c1836244 --- /dev/null +++ b/op-node/rollup/toggles.go @@ -0,0 +1,16 @@ +package rollup + +// This file contains ephemeral feature toggles which should be removed +// after the fork scope is locked. + +func (c *Config) IsMinBaseFee(time uint64) bool { + return c.IsJovian(time) // Replace with return false to disable +} + +func (c *Config) IsDAFootprintBlockLimit(time uint64) bool { + return c.IsJovian(time) // Replace with return false to disable +} + +func (c *Config) IsOperatorFeeFix(time uint64) bool { + return c.IsJovian(time) // Replace with return false to disable +} diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 3930e207ec7c1..4fa0724201147 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -11,6 +11,7 @@ import ( altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -123,14 +124,14 @@ type Config struct { // Active if IsthmusTime != nil && L2 block timestamp >= *IsthmusTime, inactive otherwise. IsthmusTime *uint64 `json:"isthmus_time,omitempty"` - // InteropTime sets the activation time for an experimental feature-set, activated like a hardfork. - // Active if InteropTime != nil && L2 block timestamp >= *InteropTime, inactive otherwise. - InteropTime *uint64 `json:"interop_time,omitempty"` - // JovianTime sets the activation time of the Jovian network upgrade. // Active if JovianTime != nil && L2 block timestamp >= *JovianTime, inactive otherwise. JovianTime *uint64 `json:"jovian_time,omitempty"` + // InteropTime sets the activation time for an experimental feature-set, activated like a hardfork. + // Active if InteropTime != nil && L2 block timestamp >= *InteropTime, inactive otherwise. + InteropTime *uint64 `json:"interop_time,omitempty"` + // Note: below addresses are part of the block-derivation process, // and required to be the same network-wide to stay in consensus. @@ -190,14 +191,14 @@ func (cfg *Config) IsL2BlobTimeSet() bool { } // ValidateL1Config checks L1 config variables for errors. -func (cfg *Config) ValidateL1Config(ctx context.Context, client L1Client) error { +func (cfg *Config) ValidateL1Config(ctx context.Context, logger log.Logger, client L1Client) error { // Validate the L1 Client Chain ID if err := cfg.CheckL1ChainID(ctx, client); err != nil { return err } // Validate the Rollup L1 Genesis Blockhash - if err := cfg.CheckL1GenesisBlockHash(ctx, client); err != nil { + if err := cfg.CheckL1GenesisBlockHash(ctx, logger, client); err != nil { return err } @@ -258,9 +259,14 @@ func (cfg *Config) CheckL1ChainID(ctx context.Context, client L1Client) error { } // CheckL1GenesisBlockHash checks that the configured L1 genesis block hash is valid for the given client. -func (cfg *Config) CheckL1GenesisBlockHash(ctx context.Context, client L1Client) error { +func (cfg *Config) CheckL1GenesisBlockHash(ctx context.Context, logger log.Logger, client L1Client) error { l1GenesisBlockRef, err := client.L1BlockRefByNumber(ctx, cfg.Genesis.L1.Number) if err != nil { + if errors.Is(eth.MaybeAsNotFoundErr(err), ethereum.NotFound) { + // Genesis block isn't available to check, so just accept it and hope for the best + logger.Warn("L1 genesis block not found, skipping validity check") + return nil + } return fmt.Errorf("failed to get L1 genesis blockhash: %w", err) } if l1GenesisBlockRef.Hash != cfg.Genesis.L1.Hash { @@ -450,54 +456,59 @@ func (c *Config) L1Signer() types.Signer { return types.LatestSignerForChainID(c.L1ChainID) } +func (c *Config) IsForkActive(fork ForkName, timestamp uint64) bool { + activationTime := c.ActivationTimeFor(fork) + return activationTime != nil && timestamp >= *activationTime +} + // IsRegolith returns true if the Regolith hardfork is active at or past the given timestamp. func (c *Config) IsRegolith(timestamp uint64) bool { - return c.RegolithTime != nil && timestamp >= *c.RegolithTime + return c.IsForkActive(Regolith, timestamp) } // IsCanyon returns true if the Canyon hardfork is active at or past the given timestamp. func (c *Config) IsCanyon(timestamp uint64) bool { - return c.CanyonTime != nil && timestamp >= *c.CanyonTime + return c.IsForkActive(Canyon, timestamp) } // IsDelta returns true if the Delta hardfork is active at or past the given timestamp. func (c *Config) IsDelta(timestamp uint64) bool { - return c.DeltaTime != nil && timestamp >= *c.DeltaTime + return c.IsForkActive(Delta, timestamp) } // IsEcotone returns true if the Ecotone hardfork is active at or past the given timestamp. func (c *Config) IsEcotone(timestamp uint64) bool { - return c.EcotoneTime != nil && timestamp >= *c.EcotoneTime + return c.IsForkActive(Ecotone, timestamp) } // IsFjord returns true if the Fjord hardfork is active at or past the given timestamp. func (c *Config) IsFjord(timestamp uint64) bool { - return c.FjordTime != nil && timestamp >= *c.FjordTime + return c.IsForkActive(Fjord, timestamp) } // IsGranite returns true if the Granite hardfork is active at or past the given timestamp. func (c *Config) IsGranite(timestamp uint64) bool { - return c.GraniteTime != nil && timestamp >= *c.GraniteTime + return c.IsForkActive(Granite, timestamp) } // IsHolocene returns true if the Holocene hardfork is active at or past the given timestamp. func (c *Config) IsHolocene(timestamp uint64) bool { - return c.HoloceneTime != nil && timestamp >= *c.HoloceneTime + return c.IsForkActive(Holocene, timestamp) } // IsIsthmus returns true if the Isthmus hardfork is active at or past the given timestamp. func (c *Config) IsIsthmus(timestamp uint64) bool { - return c.IsthmusTime != nil && timestamp >= *c.IsthmusTime + return c.IsForkActive(Isthmus, timestamp) } // IsJovian returns true if the Jovian hardfork is active at or past the given timestamp. func (c *Config) IsJovian(timestamp uint64) bool { - return c.JovianTime != nil && timestamp >= *c.JovianTime + return c.IsForkActive(Jovian, timestamp) } // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. func (c *Config) IsInterop(timestamp uint64) bool { - return c.InteropTime != nil && timestamp >= *c.InteropTime + return c.IsForkActive(Interop, timestamp) } func (c *Config) IsRegolithActivationBlock(l2BlockTime uint64) bool { @@ -572,14 +583,43 @@ func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { !c.IsInterop(l2BlockTime-c.BlockTime) } +func (c *Config) ActivationTimeFor(fork ForkName) *uint64 { + switch fork { + case Interop: + return c.InteropTime + case Jovian: + return c.JovianTime + case Isthmus: + return c.IsthmusTime + case Holocene: + return c.HoloceneTime + case Granite: + return c.GraniteTime + case Fjord: + return c.FjordTime + case Ecotone: + return c.EcotoneTime + case Delta: + return c.DeltaTime + case Canyon: + return c.CanyonTime + case Regolith: + return c.RegolithTime + default: + panic(fmt.Sprintf("unknown fork: %v", fork)) + } +} + // IsActivationBlock returns the fork which activates at the block with time newTime if the previous // block's time is oldTime. It return an empty ForkName if no fork activation takes place between // those timestamps. It can be used for both, L1 and L2 blocks. -// TODO(12490): Currently only supports Holocene. Will be modularized in a follow-up. func (c *Config) IsActivationBlock(oldTime, newTime uint64) ForkName { if c.IsInterop(newTime) && !c.IsInterop(oldTime) { return Interop } + if c.IsJovian(newTime) && !c.IsJovian(oldTime) { + return Jovian + } if c.IsIsthmus(newTime) && !c.IsIsthmus(oldTime) { return Isthmus } diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index aad48c1757ebe..bb53ef9cd9e34 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -3,12 +3,15 @@ package rollup import ( "context" "encoding/json" + "errors" "fmt" "math/big" "math/rand" "testing" "time" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -64,6 +67,7 @@ func TestConfigJSON(t *testing.T) { type mockL1Client struct { chainID *big.Int Hash common.Hash + err error } func (m *mockL1Client) ChainID(context.Context) (*big.Int, error) { @@ -71,6 +75,9 @@ func (m *mockL1Client) ChainID(context.Context) (*big.Int, error) { } func (m *mockL1Client) L1BlockRefByNumber(ctx context.Context, number uint64) (eth.L1BlockRef, error) { + if m.err != nil { + return eth.L1BlockRef{}, m.err + } return eth.L1BlockRef{ Hash: m.Hash, Number: 100, @@ -83,7 +90,7 @@ func TestValidateL1Config(t *testing.T) { config.Genesis.L1.Number = 100 config.Genesis.L1.Hash = [32]byte{0x01} mockClient := mockL1Client{chainID: big.NewInt(100), Hash: common.Hash{0x01}} - err := config.ValidateL1Config(context.TODO(), &mockClient) + err := config.ValidateL1Config(context.TODO(), testlog.Logger(t, log.LvlInfo), &mockClient) assert.NoError(t, err) } @@ -93,10 +100,11 @@ func TestValidateL1ConfigInvalidChainIdFails(t *testing.T) { config.Genesis.L1.Number = 100 config.Genesis.L1.Hash = [32]byte{0x01} mockClient := mockL1Client{chainID: big.NewInt(100), Hash: common.Hash{0x01}} - err := config.ValidateL1Config(context.TODO(), &mockClient) + logger := testlog.Logger(t, log.LvlInfo) + err := config.ValidateL1Config(context.TODO(), logger, &mockClient) assert.Error(t, err) config.L1ChainID = big.NewInt(99) - err = config.ValidateL1Config(context.TODO(), &mockClient) + err = config.ValidateL1Config(context.TODO(), logger, &mockClient) assert.Error(t, err) } @@ -106,10 +114,11 @@ func TestValidateL1ConfigInvalidGenesisHashFails(t *testing.T) { config.Genesis.L1.Number = 100 config.Genesis.L1.Hash = [32]byte{0x00} mockClient := mockL1Client{chainID: big.NewInt(100), Hash: common.Hash{0x01}} - err := config.ValidateL1Config(context.TODO(), &mockClient) + logger := testlog.Logger(t, log.LvlInfo) + err := config.ValidateL1Config(context.TODO(), logger, &mockClient) assert.Error(t, err) config.Genesis.L1.Hash = [32]byte{0x02} - err = config.ValidateL1Config(context.TODO(), &mockClient) + err = config.ValidateL1Config(context.TODO(), logger, &mockClient) assert.Error(t, err) } @@ -125,18 +134,23 @@ func TestCheckL1ChainID(t *testing.T) { } func TestCheckL1BlockRefByNumber(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) config := randConfig() config.Genesis.L1.Number = 100 config.Genesis.L1.Hash = [32]byte{0x01} mockClient := mockL1Client{chainID: big.NewInt(100), Hash: common.Hash{0x01}} - err := config.CheckL1GenesisBlockHash(context.TODO(), &mockClient) + err := config.CheckL1GenesisBlockHash(context.Background(), logger, &mockClient) assert.NoError(t, err) mockClient.Hash = common.Hash{0x02} - err = config.CheckL1GenesisBlockHash(context.TODO(), &mockClient) + err = config.CheckL1GenesisBlockHash(context.Background(), logger, &mockClient) assert.Error(t, err) mockClient.Hash = common.Hash{0x00} - err = config.CheckL1GenesisBlockHash(context.TODO(), &mockClient) + err = config.CheckL1GenesisBlockHash(context.Background(), logger, &mockClient) assert.Error(t, err) + + mockClient.err = errors.New("block not found") + err = config.CheckL1GenesisBlockHash(context.Background(), logger, &mockClient) + assert.NoError(t, err) } // TestRandomConfigDescription tests that the description works for different variations of a random rollup config. diff --git a/op-node/service.go b/op-node/service.go index 1eec481158cce..944cba634e514 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "math/big" "os" "strings" @@ -12,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-node/chaincfg" @@ -23,7 +25,9 @@ import ( "github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/interop" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/eth" opflags "github.com/ethereum-optimism/optimism/op-service/flags" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" "github.com/ethereum-optimism/optimism/op-service/rpc" @@ -42,6 +46,11 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*config.Config, error) { return nil, err } + l1ChainConfig, err := NewL1ChainConfig(rollupConfig.L1ChainID, ctx, log) + if err != nil { + return nil, err + } + depSet, err := NewDependencySetFromCLI(ctx) if err != nil { return nil, err @@ -92,6 +101,7 @@ func NewConfig(ctx *cli.Context, log log.Logger) (*config.Config, error) { cfg := &config.Config{ L1: l1Endpoint, L2: l2Endpoint, + L1ChainConfig: l1ChainConfig, Rollup: *rollupConfig, DependencySet: depSet, Driver: *driverConfig, @@ -281,12 +291,60 @@ func applyOverrides(ctx *cli.Context, rollupConfig *rollup.Config) { isthmus := ctx.Uint64(opflags.IsthmusOverrideFlagName) rollupConfig.IsthmusTime = &isthmus } + if ctx.IsSet(opflags.JovianOverrideFlagName) { + jovian := ctx.Uint64(opflags.JovianOverrideFlagName) + rollupConfig.JovianTime = &jovian + } if ctx.IsSet(opflags.InteropOverrideFlagName) { interop := ctx.Uint64(opflags.InteropOverrideFlagName) rollupConfig.InteropTime = &interop } } +func NewL1ChainConfig(chainId *big.Int, ctx *cli.Context, log log.Logger) (*params.ChainConfig, error) { + if chainId == nil { + panic("l1 chain id is nil") + } + + if cfg := eth.L1ChainConfigByChainID(eth.ChainIDFromBig(chainId)); cfg != nil { + return cfg, nil + } + + // if the chain id is not known, we fallback to the CLI config + cf, err := NewL1ChainConfigFromCLI(log, ctx) + if err != nil { + return nil, err + } + if cf.ChainID.Cmp(chainId) != 0 { + return nil, fmt.Errorf("l1 chain config chain ID mismatch: %v != %v", cf.ChainID, chainId) + } + if cf.BlobScheduleConfig == nil { + return nil, fmt.Errorf("L1 chain config does not have a blob schedule config") + } + return cf, nil +} + +func NewL1ChainConfigFromCLI(log log.Logger, ctx *cli.Context) (*params.ChainConfig, error) { + l1ChainConfigPath := ctx.String(flags.L1ChainConfig.Name) + file, err := os.Open(l1ChainConfigPath) + if err != nil { + return nil, fmt.Errorf("failed to read chain spec: %w", err) + } + defer file.Close() + + // Attempt to decode directly as a ChainConfig + var chainConfig params.ChainConfig + dec := json.NewDecoder(file) + dec.DisallowUnknownFields() + if err := dec.Decode(&chainConfig); err == nil { + return &chainConfig, nil + } + + // If that fails, try to load the config from the .config property. + // This should work if the provided file is a genesis file / chainspec + return jsonutil.LoadJSONFieldStrict[params.ChainConfig](l1ChainConfigPath, "config") +} + func NewDependencySetFromCLI(ctx *cli.Context) (depset.DependencySet, error) { if !ctx.IsSet(flags.InteropDependencySet.Name) { return nil, nil diff --git a/op-node/service_l1_chain_config_test.go b/op-node/service_l1_chain_config_test.go new file mode 100644 index 0000000000000..27e8d66832418 --- /dev/null +++ b/op-node/service_l1_chain_config_test.go @@ -0,0 +1,121 @@ +package opnode + +import ( + "encoding/json" + "math/big" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + + nodeflags "github.com/ethereum-optimism/optimism/op-node/flags" +) + +func TestNewL1ChainConfig_KnownChains(t *testing.T) { + logger := log.New() + app := cli.NewApp() + ctx := cli.NewContext(app, nil, nil) + + t.Run("mainnet", func(t *testing.T) { + cfg, err := NewL1ChainConfig(new(big.Int).Set(params.MainnetChainConfig.ChainID), ctx, logger) + require.NoError(t, err) + require.Equal(t, params.MainnetChainConfig, cfg) + }) + + t.Run("sepolia", func(t *testing.T) { + cfg, err := NewL1ChainConfig(new(big.Int).Set(params.SepoliaChainConfig.ChainID), ctx, logger) + require.NoError(t, err) + require.Equal(t, params.SepoliaChainConfig, cfg) + }) +} + +func TestNewL1ChainConfig_CustomDirectAndEmbeddedAndNil(t *testing.T) { + logger := log.New() + + testChainID := big.NewInt(424242) + + // Build a minimal custom ChainConfig + custom := ¶ms.ChainConfig{ + ChainID: testChainID, + BlobScheduleConfig: ¶ms.BlobScheduleConfig{}, + } + + customFaulty := ¶ms.ChainConfig{ + ChainID: testChainID, + BlobScheduleConfig: nil, + } + + // Prepare temp dir + dir := t.TempDir() + + encode := func(path string, cfg any) { + f, err := os.Create(path) + require.NoError(t, err) + enc := json.NewEncoder(f) + err = enc.Encode(cfg) + require.NoError(t, err) + require.NoError(t, f.Close()) + } + + // Direct JSON file containing a ChainConfig + directPath := filepath.Join(dir, "chainconfig.json") + encode(directPath, custom) + + directFaultyPath := filepath.Join(dir, "chainconfig_faulty.json") + encode(directFaultyPath, customFaulty) + + // Embedded JSON file that contains { "config": } + embeddedPath := filepath.Join(dir, "genesis_like.json") + type wrapper struct { + Config *params.ChainConfig `json:"config"` + } + encode(embeddedPath, wrapper{Config: custom}) + + // Helper to run the CLI with a given file path + runWithPath := func(path string) (*params.ChainConfig, error) { + app := cli.NewApp() + app.Flags = []cli.Flag{nodeflags.L1ChainConfig} + var out *params.ChainConfig + app.Action = func(ctx *cli.Context) error { + cfg, err := NewL1ChainConfig(testChainID, ctx, logger) + out = cfg + return err + } + // run with arg: --rollup.l1-chain-config + err := app.Run([]string{"op-node", "--" + nodeflags.L1ChainConfig.Name, path}) + return out, err + } + + t.Run("custom-direct", func(t *testing.T) { + cfg, err := runWithPath(directPath) + require.NoError(t, err) + require.NotNil(t, cfg) + require.Equal(t, custom.ChainID, cfg.ChainID) + }) + + t.Run("custom-embedded", func(t *testing.T) { + cfg, err := runWithPath(embeddedPath) + require.NoError(t, err) + require.NotNil(t, cfg) + require.Equal(t, custom.ChainID, cfg.ChainID) + }) + + t.Run("nil-chainid-panics", func(t *testing.T) { + app := cli.NewApp() + ctx := cli.NewContext(app, nil, nil) + require.Panics(t, func() { + _, _ = NewL1ChainConfig(nil, ctx, logger) + }) + }) + + t.Run("nil-blob-schedule-config-returns-error", func(t *testing.T) { + cfg, err := runWithPath(directFaultyPath) + require.Nil(t, cfg) + require.Error(t, err) + }) +} diff --git a/op-program/chainconfig/chaincfg.go b/op-program/chainconfig/chaincfg.go index a08fff8c287b9..41c8be28e7e70 100644 --- a/op-program/chainconfig/chaincfg.go +++ b/op-program/chainconfig/chaincfg.go @@ -25,7 +25,7 @@ var ( // OPSepoliaChainConfig loads the op-sepolia chain config. This is intended for tests that need an arbitrary, valid chain config. func OPSepoliaChainConfig() *params.ChainConfig { - return mustLoadChainConfig("op-sepolia") + return mustLoadL2ChainConfig("op-sepolia") } //go:embed configs/*json @@ -78,17 +78,17 @@ func rollupConfigByChainID(chainID eth.ChainID, customChainFS embed.FS) (*rollup return &customRollupConfig, customRollupConfig.ParseRollupConfig(file) } -// ChainConfigByChainID locates the genesis chain config from either the superchain-registry or the embed. +// L2ChainConfigByChainID locates the genesis chain config from either the superchain-registry or the embed. // Returns ErrMissingChainConfig if the chain config is not found. -func ChainConfigByChainID(chainID eth.ChainID) (*params.ChainConfig, error) { +func L2ChainConfigByChainID(chainID eth.ChainID) (*params.ChainConfig, error) { config, err := superutil.LoadOPStackChainConfigFromChainID(eth.EvilChainIDToUInt64(chainID)) if err == nil { return config, err } - return chainConfigByChainID(chainID, customChainConfigFS) + return l2ChainConfigByChainID(chainID, customChainConfigFS) } -func chainConfigByChainID(chainID eth.ChainID, customChainFS embed.FS) (*params.ChainConfig, error) { +func l2ChainConfigByChainID(chainID eth.ChainID, customChainFS embed.FS) (*params.ChainConfig, error) { // Load from custom chain configs from embed FS data, err := customChainFS.ReadFile(fmt.Sprintf("configs/%v-genesis-l2.json", chainID)) if errors.Is(err, os.ErrNotExist) { @@ -104,12 +104,36 @@ func chainConfigByChainID(chainID eth.ChainID, customChainFS embed.FS) (*params. return genesis.Config, nil } -func mustLoadChainConfig(name string) *params.ChainConfig { +func L1ChainConfigByChainID(chainID eth.ChainID) (*params.ChainConfig, error) { + if cfg := eth.L1ChainConfigByChainID(chainID); cfg != nil { + return cfg, nil + } + // if the l1 chain id is not known, we fallback to the custom chain config + return l1ChainConfigByChainID(chainID, customChainConfigFS) +} + +func l1ChainConfigByChainID(chainID eth.ChainID, customChainFS embed.FS) (*params.ChainConfig, error) { + // Load from custom chain configs from embed FS + data, err := customChainFS.ReadFile(fmt.Sprintf("configs/%v-genesis-l1.json", chainID)) + if errors.Is(err, os.ErrNotExist) { + return nil, fmt.Errorf("%w: no chain config available for chain ID: %v", ErrMissingChainConfig, chainID) + } else if err != nil { + return nil, fmt.Errorf("failed to get chain config for chain ID %v: %w", chainID, err) + } + var genesis core.Genesis + err = json.Unmarshal(data, &genesis) + if err != nil { + return nil, fmt.Errorf("failed to parse chain config for chain ID %v: %w", chainID, err) + } + return genesis.Config, nil +} + +func mustLoadL2ChainConfig(name string) *params.ChainConfig { chainCfg := chaincfg.ChainByName(name) if chainCfg == nil { panic(fmt.Errorf("%w: unknown chain config %q", errChainNotFound, name)) } - cfg, err := ChainConfigByChainID(eth.ChainIDFromUInt64(chainCfg.ChainID)) + cfg, err := L2ChainConfigByChainID(eth.ChainIDFromUInt64(chainCfg.ChainID)) if err != nil { panic(fmt.Errorf("failed to load rollup config: %q: %w", name, err)) } @@ -159,18 +183,23 @@ func checkConfigFilenames(customChainFS embed.FS, configPath string) error { return fmt.Errorf("failed to check custom configs directory: %w", err) } var rollupChainIDs []eth.ChainID - var genesisChainIDs []eth.ChainID + var l2genesisChainIDs []eth.ChainID for _, entry := range entries { entryName := entry.Name() switch { case "placeholder.json" == entryName: case "depsets.json" == entryName: + case strings.HasSuffix(entryName, "-genesis-l1.json"): + _, err := eth.ParseDecimalChainID(strings.TrimSuffix(entry.Name(), "-genesis-l1.json")) + if err != nil { + return fmt.Errorf("incorrectly named genesis-l1 config (%s). expected -genesis-l1.json: %w", entryName, err) + } case strings.HasSuffix(entryName, "-genesis-l2.json"): id, err := eth.ParseDecimalChainID(strings.TrimSuffix(entry.Name(), "-genesis-l2.json")) if err != nil { return fmt.Errorf("incorrectly named genesis-l2 config (%s). expected -genesis-l2.json: %w", entryName, err) } - genesisChainIDs = append(genesisChainIDs, id) + l2genesisChainIDs = append(l2genesisChainIDs, id) case strings.HasSuffix(entryName, "-rollup.json"): id, err := eth.ParseDecimalChainID(strings.TrimSuffix(entry.Name(), "-rollup.json")) if err != nil { @@ -181,8 +210,9 @@ func checkConfigFilenames(customChainFS embed.FS, configPath string) error { return fmt.Errorf("invalid config file name: %s, Make sure that the only files in the custom config directory are placeholder.json, depsets.json, -genesis-l2.json or -rollup.json", entryName) } } - if !slices.Equal(rollupChainIDs, genesisChainIDs) { - return fmt.Errorf("mismatched chain IDs in custom configs: rollup chain IDs %v, genesis chain IDs %v. Make sure that the rollup and genesis configs have the same set of chain IDs prefixes", rollupChainIDs, genesisChainIDs) + if !slices.Equal(rollupChainIDs, l2genesisChainIDs) { + return fmt.Errorf("mismatched chain IDs in custom configs: rollup chain IDs %v, l2 genesis chain IDs %v. Make sure that the rollup and l2 genesis configs have the same set of chain IDs prefixes", rollupChainIDs, l2genesisChainIDs) } + return nil } diff --git a/op-program/chainconfig/chaincfg_test.go b/op-program/chainconfig/chaincfg_test.go index c84909c408cef..6b4246fdd085a 100644 --- a/op-program/chainconfig/chaincfg_test.go +++ b/op-program/chainconfig/chaincfg_test.go @@ -27,16 +27,44 @@ func TestGetCustomRollupConfig_Missing(t *testing.T) { // TestGetCustomChainConfig tests loading the custom chain configs from test embed FS. func TestGetCustomChainConfig(t *testing.T) { - config, err := chainConfigByChainID(eth.ChainIDFromUInt64(901), test.TestCustomChainConfigFS) + config, err := l2ChainConfigByChainID(eth.ChainIDFromUInt64(901), test.TestCustomChainConfigFS) require.NoError(t, err) require.Equal(t, config.ChainID.Uint64(), uint64(901)) - _, err = chainConfigByChainID(eth.ChainIDFromUInt64(900), test.TestCustomChainConfigFS) + _, err = l2ChainConfigByChainID(eth.ChainIDFromUInt64(900), test.TestCustomChainConfigFS) require.Error(t, err) } func TestGetCustomChainConfig_Missing(t *testing.T) { - _, err := chainConfigByChainID(eth.ChainIDFromUInt64(11111), test.TestCustomChainConfigFS) + _, err := l2ChainConfigByChainID(eth.ChainIDFromUInt64(11111), test.TestCustomChainConfigFS) + require.ErrorIs(t, err, ErrMissingChainConfig) +} + +func TestGetCustomL1ChainConfig(t *testing.T) { + config, err := l1ChainConfigByChainID(eth.ChainIDFromUInt64(900), test.TestCustomChainConfigFS) + require.NoError(t, err) + require.Equal(t, config.ChainID.Uint64(), uint64(900)) +} + +func TestGetCustomL1ChainConfig_Missing(t *testing.T) { + _, err := l1ChainConfigByChainID(eth.ChainIDFromUInt64(11111), test.TestCustomChainConfigFS) + require.ErrorIs(t, err, ErrMissingChainConfig) +} + +func TestGetCustomL1ChainConfig_KnownChainID(t *testing.T) { + knownChainIds := []eth.ChainID{ + eth.ChainIDFromUInt64(1), // Mainnet + eth.ChainIDFromUInt64(11155111), // Sepolia + eth.ChainIDFromUInt64(17000), // Holesky + eth.ChainIDFromUInt64(560048), // Hoodi + } + for _, chainID := range knownChainIds { + cfg, err := L1ChainConfigByChainID(chainID) + require.NoError(t, err) + require.True(t, chainID.Cmp(eth.ChainIDFromBig(cfg.ChainID)) == 0) + } + unknownChainId := eth.ChainIDFromUInt64(11111) + _, err := L1ChainConfigByChainID(unknownChainId) require.ErrorIs(t, err, ErrMissingChainConfig) } @@ -79,6 +107,16 @@ func TestCheckConfigFilenames(t *testing.T) { require.NoError(t, err) } +func TestCheckConfigFilenames_WithoutCustomL1Genesis(t *testing.T) { + err := checkConfigFilenames(test.TestCustomChainConfigNoL1FS, "configs_no_l1") + require.NoError(t, err) +} + +func TestCheckConfigFilenames_MultipleL1Genesis(t *testing.T) { + err := checkConfigFilenames(test.TestCustomChainConfigMultipleL1FS, "configs_multiple_l1") + require.NoError(t, err) +} + func TestCheckConfigFilenames_Missing(t *testing.T) { err := checkConfigFilenames(test.TestCustomChainConfigEmptyFS, "configs_empty") require.NoError(t, err) diff --git a/op-program/chainconfig/test/configs/900-genesis-l1.json b/op-program/chainconfig/test/configs/900-genesis-l1.json new file mode 100644 index 0000000000000..55d82624362c7 --- /dev/null +++ b/op-program/chainconfig/test/configs/900-genesis-l1.json @@ -0,0 +1,32 @@ +{ + "config": { + "chainId": 900, + "homesteadBlock": 0, + "eip150Block": 0, + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "arrowGlacierBlock": 0, + "grayGlacierBlock": 0, + "parisBlock": 0, + "shanghaiTime": 0, + "cancunTime": 0 + }, + "nonce": "0x0", + "timestamp": "0x0", + "extraData": "0x", + "gasLimit": "0x1c9c380", + "difficulty": "0x0", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "alloc": {}, + "number": "0x0", + "gasUsed": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" +} diff --git a/op-program/chainconfig/test/configs_multiple_l1/899-genesis-l1.json b/op-program/chainconfig/test/configs_multiple_l1/899-genesis-l1.json new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/op-program/chainconfig/test/configs_multiple_l1/900-genesis-l1.json b/op-program/chainconfig/test/configs_multiple_l1/900-genesis-l1.json new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/op-program/chainconfig/test/configs_no_l1/901-genesis-l2.json b/op-program/chainconfig/test/configs_no_l1/901-genesis-l2.json new file mode 100644 index 0000000000000..0967ef424bce6 --- /dev/null +++ b/op-program/chainconfig/test/configs_no_l1/901-genesis-l2.json @@ -0,0 +1 @@ +{} diff --git a/op-program/chainconfig/test/configs_no_l1/901-rollup.json b/op-program/chainconfig/test/configs_no_l1/901-rollup.json new file mode 100644 index 0000000000000..0967ef424bce6 --- /dev/null +++ b/op-program/chainconfig/test/configs_no_l1/901-rollup.json @@ -0,0 +1 @@ +{} diff --git a/op-program/chainconfig/test/test.go b/op-program/chainconfig/test/test.go index 9d74fd9aff592..0e9ff6907a193 100644 --- a/op-program/chainconfig/test/test.go +++ b/op-program/chainconfig/test/test.go @@ -10,3 +10,9 @@ var TestCustomChainConfigEmptyFS embed.FS //go:embed configs_typo/*json var TestCustomChainConfigTypoFS embed.FS + +//go:embed configs_no_l1/*.json +var TestCustomChainConfigNoL1FS embed.FS + +//go:embed configs_multiple_l1/*.json +var TestCustomChainConfigMultipleL1FS embed.FS diff --git a/op-program/client/boot/boot.go b/op-program/client/boot/boot.go index 3520771f0bfe1..220a9b253b4c4 100644 --- a/op-program/client/boot/boot.go +++ b/op-program/client/boot/boot.go @@ -3,6 +3,7 @@ package boot import ( "encoding/binary" "encoding/json" + "fmt" "math" "github.com/ethereum-optimism/optimism/op-node/rollup" @@ -24,6 +25,7 @@ type BootInfo struct { L2ChainConfig *params.ChainConfig RollupConfig *rollup.Config + L1ChainConfig *params.ChainConfig } type BootstrapClient struct { @@ -41,6 +43,7 @@ func (br *BootstrapClient) BootInfo() *BootInfo { l2ClaimBlockNumber := binary.BigEndian.Uint64(br.r.Get(L2ClaimBlockNumberLocalIndex)) l2ChainID := eth.ChainIDFromUInt64(binary.BigEndian.Uint64(br.r.Get(L2ChainIDLocalIndex))) + var l1ChainConfig *params.ChainConfig var l2ChainConfig *params.ChainConfig var rollupConfig *rollup.Config if l2ChainID == CustomChainIDIndicator { @@ -54,13 +57,27 @@ func (br *BootstrapClient) BootInfo() *BootInfo { if err != nil { panic("failed to bootstrap rollup config") } + l1ChainConfig = new(params.ChainConfig) + err = json.Unmarshal(br.r.Get(L1ChainConfigLocalIndex), l1ChainConfig) + if err != nil { + panic("failed to bootstrap l1ChainConfig: " + fmt.Sprintf("%v", err)) + } + if l1ChainConfig.ChainID.Cmp(rollupConfig.L1ChainID) != 0 { + panic(fmt.Sprintf("l1ChainConfig chain ID does not match rollup config L1 chain ID: %v != %v", + l1ChainConfig.ChainID, rollupConfig.L1ChainID)) + } } else { var err error rollupConfig, err = chainconfig.RollupConfigByChainID(l2ChainID) if err != nil { panic(err) } - l2ChainConfig, err = chainconfig.ChainConfigByChainID(l2ChainID) + l1ChainID := eth.ChainIDFromBig(rollupConfig.L1ChainID) + l1ChainConfig, err = chainconfig.L1ChainConfigByChainID(l1ChainID) + if err != nil { + panic(err) + } + l2ChainConfig, err = chainconfig.L2ChainConfigByChainID(l2ChainID) if err != nil { panic(err) } @@ -74,5 +91,6 @@ func (br *BootstrapClient) BootInfo() *BootInfo { L2ChainID: l2ChainID, L2ChainConfig: l2ChainConfig, RollupConfig: rollupConfig, + L1ChainConfig: l1ChainConfig, } } diff --git a/op-program/client/boot/boot_interop.go b/op-program/client/boot/boot_interop.go index dcf8d44447a07..2cea211b83eff 100644 --- a/op-program/client/boot/boot_interop.go +++ b/op-program/client/boot/boot_interop.go @@ -15,7 +15,8 @@ import ( ) var ( - ErrUnknownChainID = errors.New("unknown chain id") + ErrUnknownChainID = errors.New("unknown chain id") + ErrL1ChainConfigMismatch = errors.New("l1 chain config chain ID mismatch") ) type BootInfoInterop struct { @@ -30,6 +31,7 @@ type BootInfoInterop struct { type ConfigSource interface { RollupConfig(chainID eth.ChainID) (*rollup.Config, error) ChainConfig(chainID eth.ChainID) (*params.ChainConfig, error) + L1ChainConfig(chainID eth.ChainID) (*params.ChainConfig, error) DependencySet(chainID eth.ChainID) (depset.DependencySet, error) } @@ -38,6 +40,7 @@ type OracleConfigSource struct { customConfigsLoaded bool + l1ChainConfig *params.ChainConfig l2ChainConfigs map[eth.ChainID]*params.ChainConfig rollupConfigs map[eth.ChainID]*rollup.Config depset depset.DependencySet @@ -66,7 +69,7 @@ func (c *OracleConfigSource) ChainConfig(chainID eth.ChainID) (*params.ChainConf if cfg, ok := c.l2ChainConfigs[chainID]; ok { return cfg, nil } - cfg, err := chainconfig.ChainConfigByChainID(chainID) + cfg, err := chainconfig.L2ChainConfigByChainID(chainID) if !c.customConfigsLoaded && errors.Is(err, chainconfig.ErrMissingChainConfig) { c.loadCustomConfigs() if cfg, ok := c.l2ChainConfigs[chainID]; !ok { @@ -99,6 +102,21 @@ func (c *OracleConfigSource) DependencySet(chainID eth.ChainID) (depset.Dependen return c.depset, nil } +func (c *OracleConfigSource) L1ChainConfig(chainID eth.ChainID) (*params.ChainConfig, error) { + if c.l1ChainConfig != nil { + if c.l1ChainConfig.ChainID.Cmp(chainID.ToBig()) != 0 { + panic(fmt.Errorf("%w: %v != %v", ErrL1ChainConfigMismatch, c.l1ChainConfig.ChainID, chainID)) + } + return c.l1ChainConfig, nil + } + cfg, err := chainconfig.L1ChainConfigByChainID(chainID) + if err != nil { + return nil, err + } + c.l1ChainConfig = cfg + return cfg, nil +} + func (c *OracleConfigSource) loadCustomConfigs() { var rollupConfigs []*rollup.Config err := json.Unmarshal(c.oracle.Get(RollupConfigLocalIndex), &rollupConfigs) @@ -125,6 +143,13 @@ func (c *OracleConfigSource) loadCustomConfigs() { } c.depset = &depset c.customConfigsLoaded = true + + var l1ChainConfig *params.ChainConfig + err = json.Unmarshal(c.oracle.Get(L1ChainConfigLocalIndex), &l1ChainConfig) + if err != nil { + panic("failed to bootstrap l1 chain configs: " + fmt.Sprintf("%v", err)) + } + c.l1ChainConfig = l1ChainConfig } func BootstrapInterop(r oracleClient) *BootInfoInterop { diff --git a/op-program/client/boot/boot_interop_test.go b/op-program/client/boot/boot_interop_test.go index 773ce6bda7b98..7db252520c56f 100644 --- a/op-program/client/boot/boot_interop_test.go +++ b/op-program/client/boot/boot_interop_test.go @@ -24,7 +24,7 @@ func TestInteropBootstrap_SimpleValues(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(expected, false) + mockOracle := newMockInteropBootstrapOracle(expected, false, params.SepoliaChainConfig) actual := BootstrapInterop(mockOracle) require.Equal(t, expected.L1Head, actual.L1Head) require.Equal(t, expected.AgreedPrestate, actual.AgreedPrestate) @@ -40,7 +40,7 @@ func TestInteropBootstrap_RollupConfigBuiltIn(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(expected, false) + mockOracle := newMockInteropBootstrapOracle(expected, false, params.SepoliaChainConfig) actual := BootstrapInterop(mockOracle) actualCfg, err := actual.Configs.RollupConfig(eth.ChainIDFromBig(expectedCfg.L2ChainID)) require.NoError(t, err) @@ -56,7 +56,7 @@ func TestInteropBootstrap_RollupConfigCustom(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(source, true) + mockOracle := newMockInteropBootstrapOracle(source, true, params.SepoliaChainConfig) mockOracle.rollupCfgs = []*rollup.Config{config1, config2} actual := BootstrapInterop(mockOracle) actualCfg, err := actual.Configs.RollupConfig(eth.ChainIDFromBig(config1.L2ChainID)) @@ -76,7 +76,7 @@ func TestInteropBootstrap_ChainConfigBuiltIn(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(expected, false) + mockOracle := newMockInteropBootstrapOracle(expected, false, params.SepoliaChainConfig) actual := BootstrapInterop(mockOracle) actualCfg, err := actual.Configs.ChainConfig(eth.ChainIDFromBig(expectedCfg.ChainID)) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestInteropBootstrap_ChainConfigCustom(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(expected, true) + mockOracle := newMockInteropBootstrapOracle(expected, true, params.SepoliaChainConfig) mockOracle.chainCfgs = []*params.ChainConfig{config1, config2} mockOracle.depset, _ = depset.NewStaticConfigDependencySet(map[eth.ChainID]*depset.StaticConfigDependency{ eth.ChainIDFromBig(config1.ChainID): {}, @@ -107,6 +107,11 @@ func TestInteropBootstrap_ChainConfigCustom(t *testing.T) { actualCfg, err = actual.Configs.ChainConfig(eth.ChainIDFromBig(config2.ChainID)) require.NoError(t, err) require.Equal(t, config2, actualCfg) + + actualCfg, err = actual.Configs.L1ChainConfig(eth.ChainIDFromBig(params.SepoliaChainConfig.ChainID)) + require.NoError(t, err) + require.Equal(t, params.SepoliaChainConfig, actualCfg) + } func TestInteropBootstrap_DependencySetCustom(t *testing.T) { @@ -118,7 +123,7 @@ func TestInteropBootstrap_DependencySetCustom(t *testing.T) { Claim: common.Hash{0xcc}, GameTimestamp: 49829482, } - mockOracle := newMockInteropBootstrapOracle(expected, true) + mockOracle := newMockInteropBootstrapOracle(expected, true, params.SepoliaChainConfig) var err error mockOracle.depset, err = depset.NewStaticConfigDependencySet(map[eth.ChainID]*depset.StaticConfigDependency{ eth.ChainIDFromBig(config1.ChainID): {}, @@ -132,7 +137,7 @@ func TestInteropBootstrap_DependencySetCustom(t *testing.T) { require.Equal(t, mockOracle.depset, depset) } -func newMockInteropBootstrapOracle(b *BootInfoInterop, custom bool) *mockInteropBootstrapOracle { +func newMockInteropBootstrapOracle(b *BootInfoInterop, custom bool, l1ChainCfg *params.ChainConfig) *mockInteropBootstrapOracle { return &mockInteropBootstrapOracle{ mockBootstrapOracle: mockBootstrapOracle{ l1Head: b.L1Head, @@ -140,7 +145,8 @@ func newMockInteropBootstrapOracle(b *BootInfoInterop, custom bool) *mockInterop l2Claim: b.Claim, l2ClaimBlockNumber: b.GameTimestamp, }, - custom: custom, + custom: custom, + l1ChainCfg: l1ChainCfg, } } @@ -148,6 +154,7 @@ type mockInteropBootstrapOracle struct { mockBootstrapOracle rollupCfgs []*rollup.Config chainCfgs []*params.ChainConfig + l1ChainCfg *params.ChainConfig depset *depset.StaticConfigDependencySet custom bool } @@ -174,6 +181,12 @@ func (o *mockInteropBootstrapOracle) Get(key preimage.Key) []byte { } b, _ := json.Marshal(o.depset) return b + case L1ChainConfigLocalIndex.PreimageKey(): + if !o.custom { + panic(fmt.Sprintf("unexpected oracle request for preimage key %x", key.PreimageKey())) + } + b, _ := json.Marshal(o.l1ChainCfg) + return b default: return o.mockBootstrapOracle.Get(key) } diff --git a/op-program/client/boot/boot_test.go b/op-program/client/boot/boot_test.go index 3d2be5512d740..df052e2aae5d6 100644 --- a/op-program/client/boot/boot_test.go +++ b/op-program/client/boot/boot_test.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-program/chainconfig" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -22,6 +23,7 @@ func TestBootstrapClient(t *testing.T) { L2Claim: common.HexToHash("0x3333"), L2ClaimBlockNumber: 1, L2ChainID: eth.ChainIDFromBig(rollupCfg.L2ChainID), + L1ChainConfig: params.SepoliaChainConfig, L2ChainConfig: chainconfig.OPSepoliaChainConfig(), RollupConfig: rollupCfg, } @@ -37,6 +39,7 @@ func TestBootstrapClient_CustomChain(t *testing.T) { L2Claim: common.HexToHash("0x3333"), L2ClaimBlockNumber: 1, L2ChainID: CustomChainIDIndicator, + L1ChainConfig: params.SepoliaChainConfig, L2ChainConfig: chainconfig.OPSepoliaChainConfig(), RollupConfig: chaincfg.OPSepolia(), } @@ -45,6 +48,21 @@ func TestBootstrapClient_CustomChain(t *testing.T) { require.EqualValues(t, bootInfo, readBootInfo) } +func TestBootstrapClient_CustomChain_L1ChainConfigMismatch(t *testing.T) { + bootInfo := &BootInfo{ + L1Head: common.HexToHash("0x1111"), + L2OutputRoot: common.HexToHash("0x2222"), + L2Claim: common.HexToHash("0x3333"), + L2ClaimBlockNumber: 1, + L2ChainID: CustomChainIDIndicator, + L1ChainConfig: params.MainnetChainConfig, + L2ChainConfig: chainconfig.OPSepoliaChainConfig(), + RollupConfig: chaincfg.OPSepolia(), + } + mockOracle := newMockPreinteropBootstrapOracle(bootInfo, true) + require.Panics(t, func() { NewBootstrapClient(mockOracle).BootInfo() }) +} + func TestBootstrapClient_UnknownChainPanics(t *testing.T) { bootInfo := &BootInfo{ L1Head: common.HexToHash("0x1111"), @@ -87,6 +105,12 @@ func (o *mockPreinteropBootstrapOracle) Get(key preimage.Key) []byte { } b, _ := json.Marshal(o.b.L2ChainConfig) return b + case L1ChainConfigLocalIndex.PreimageKey(): + if !o.custom { + panic(fmt.Sprintf("unexpected oracle request for preimage key %x", key.PreimageKey())) + } + b, _ := json.Marshal(o.b.L1ChainConfig) + return b case RollupConfigLocalIndex.PreimageKey(): if !o.custom { panic(fmt.Sprintf("unexpected oracle request for preimage key %x", key.PreimageKey())) diff --git a/op-program/client/boot/common.go b/op-program/client/boot/common.go index a81e97e26e186..d2c9ba859c738 100644 --- a/op-program/client/boot/common.go +++ b/op-program/client/boot/common.go @@ -13,6 +13,7 @@ const ( L2ChainConfigLocalIndex RollupConfigLocalIndex DependencySetLocalIndex + L1ChainConfigLocalIndex ) type oracleClient interface { diff --git a/op-program/client/cmd/godebug.go b/op-program/client/cmd/godebug.go new file mode 100644 index 0000000000000..47bda43fa4a2e --- /dev/null +++ b/op-program/client/cmd/godebug.go @@ -0,0 +1,8 @@ +// Disable annotating anonymous memory mappings. Cannon doesn't support this syscall +// The directive (and functionality) only exists on go1.25 and above so this file is conditionally included. + +//go:build go1.25 + +//go:debug decoratemappings=0 + +package main diff --git a/op-program/client/driver/driver.go b/op-program/client/driver/driver.go index d68f03978ed1c..369d87036d86c 100644 --- a/op-program/client/driver/driver.go +++ b/op-program/client/driver/driver.go @@ -6,10 +6,12 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-node/metrics" "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/attributes" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/engine" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" @@ -33,36 +35,36 @@ type Driver struct { } func NewDriver(logger log.Logger, cfg *rollup.Config, depSet derive.DependencySet, l1Source derive.L1Fetcher, - l1BlobsSource derive.L1BlobsFetcher, l2Source engine.Engine, targetBlockNum uint64) *Driver { + l1BlobsSource derive.L1BlobsFetcher, l2Source engine.Engine, targetBlockNum uint64, l1ChainConfig *params.ChainConfig) *Driver { d := &Driver{ logger: logger, } - pipeline := derive.NewDerivationPipeline(logger, cfg, depSet, l1Source, l1BlobsSource, altda.Disabled, l2Source, metrics.NoopMetrics, false) + pipeline := derive.NewDerivationPipeline(logger, cfg, depSet, l1Source, l1BlobsSource, altda.Disabled, l2Source, metrics.NoopMetrics, false, l1ChainConfig) pipelineDeriver := derive.NewPipelineDeriver(context.Background(), pipeline) pipelineDeriver.AttachEmitter(d) - ec := engine.NewEngineController(l2Source, logger, metrics.NoopMetrics, cfg, &sync.Config{SyncMode: sync.CLSync}, d) - engineDeriv := engine.NewEngDeriver(logger, context.Background(), cfg, metrics.NoopMetrics, ec) - engineDeriv.AttachEmitter(d) syncCfg := &sync.Config{SyncMode: sync.CLSync} - engResetDeriv := engine.NewEngineResetDeriver(context.Background(), logger, cfg, l1Source, l2Source, syncCfg) - engResetDeriv.AttachEmitter(d) + ec := engine.NewEngineController(context.Background(), l2Source, logger, metrics.NoopMetrics, cfg, syncCfg, l1Source, d) + + attrHandler := attributes.NewAttributesHandler(logger, cfg, context.Background(), l2Source, ec) + ec.SetAttributesResetter(attrHandler) + ec.SetPipelineResetter(pipelineDeriver) prog := &ProgramDeriver{ - logger: logger, - Emitter: d, - closing: false, - result: eth.L2BlockRef{}, - targetBlockNum: targetBlockNum, + logger: logger, + Emitter: d, + engineController: ec, + closing: false, + result: eth.L2BlockRef{}, + targetBlockNum: targetBlockNum, } d.deriver = &event.DeriverMux{ prog, - engineDeriv, + ec, pipelineDeriver, - engResetDeriv, } d.end = prog diff --git a/op-program/client/driver/program.go b/op-program/client/driver/program.go index 9b9e7ad2dbf81..4f1e23b15038d 100644 --- a/op-program/client/driver/program.go +++ b/op-program/client/driver/program.go @@ -13,6 +13,10 @@ import ( "github.com/ethereum-optimism/optimism/op-service/event" ) +type EngineController interface { + RequestPendingSafeUpdate(context.Context) +} + // ProgramDeriver expresses how engine and derivation events are // translated and monitored to execute the pure L1 to L2 state transition. // @@ -22,6 +26,8 @@ type ProgramDeriver struct { Emitter event.Emitter + engineController EngineController + closing bool result eth.L2BlockRef resultError error @@ -42,11 +48,11 @@ func (d *ProgramDeriver) OnEvent(ctx context.Context, ev event.Event) bool { d.Emitter.Emit(ctx, derive.ConfirmPipelineResetEvent{}) // After initial reset we can request the pending-safe block, // where attributes will be generated on top of. - d.Emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + d.engineController.RequestPendingSafeUpdate(ctx) case engine.PendingSafeUpdateEvent: d.Emitter.Emit(ctx, derive.PipelineStepEvent{PendingSafe: x.PendingSafe}) case derive.DeriverMoreEvent: - d.Emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + d.engineController.RequestPendingSafeUpdate(ctx) case derive.DerivedAttributesEvent: // Allow new attributes to be generated. // We will process the current attributes synchronously, @@ -59,7 +65,7 @@ func (d *ProgramDeriver) OnEvent(ctx context.Context, ev event.Event) bool { case engine.InvalidPayloadAttributesEvent: // If a set of attributes was invalid, then we drop the attributes, // and continue with the next. - d.Emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + d.engineController.RequestPendingSafeUpdate(ctx) case engine.ForkchoiceUpdateEvent: // Track latest head. if x.SafeL2Head.Number >= d.result.Number { @@ -94,7 +100,7 @@ func (d *ProgramDeriver) OnEvent(ctx context.Context, ev event.Event) bool { // (Legacy case): While most temporary errors are due to requests for external data failing which can't happen, // they may also be returned due to other events like channels timing out so need to be handled d.logger.Warn("Temporary error in derivation", "err", x.Err) - d.Emitter.Emit(ctx, engine.PendingSafeRequestEvent{}) + d.engineController.RequestPendingSafeUpdate(ctx) case rollup.CriticalErrorEvent: d.closing = true d.resultError = x.Err diff --git a/op-program/client/driver/program_test.go b/op-program/client/driver/program_test.go index 0b8b55ef0b6cf..b9401512d6f0c 100644 --- a/op-program/client/driver/program_test.go +++ b/op-program/client/driver/program_test.go @@ -23,14 +23,21 @@ var ( errTestCrit = errors.New("crit test err") ) +type fakeEngineController struct{} + +var _ EngineController = fakeEngineController{} + +func (fakeEngineController) RequestPendingSafeUpdate(ctx context.Context) {} + func TestProgramDeriver(t *testing.T) { newProgram := func(t *testing.T, target uint64) (*ProgramDeriver, *testutils.MockEmitter) { m := &testutils.MockEmitter{} logger := testlog.Logger(t, log.LevelInfo) prog := &ProgramDeriver{ - logger: logger, - Emitter: m, - targetBlockNum: target, + logger: logger, + engineController: fakeEngineController{}, + Emitter: m, + targetBlockNum: target, } return prog, m } @@ -39,7 +46,6 @@ func TestProgramDeriver(t *testing.T) { t.Run("engine reset confirmed", func(t *testing.T) { p, m := newProgram(t, 1000) m.ExpectOnce(derive.ConfirmPipelineResetEvent{}) - m.ExpectOnce(engine.PendingSafeRequestEvent{}) p.OnEvent(context.Background(), engine.EngineResetConfirmedEvent{}) m.AssertExpectations(t) require.False(t, p.closing) @@ -60,7 +66,6 @@ func TestProgramDeriver(t *testing.T) { // step 3: if no attributes are generated, loop back to derive more. t.Run("deriver more", func(t *testing.T) { p, m := newProgram(t, 1000) - m.ExpectOnce(engine.PendingSafeRequestEvent{}) p.OnEvent(context.Background(), derive.DeriverMoreEvent{}) m.AssertExpectations(t) require.False(t, p.closing) @@ -80,7 +85,6 @@ func TestProgramDeriver(t *testing.T) { // step 5: if attributes were invalid, continue with derivation for new attributes. t.Run("invalid payload", func(t *testing.T) { p, m := newProgram(t, 1000) - m.ExpectOnce(engine.PendingSafeRequestEvent{}) p.OnEvent(context.Background(), engine.InvalidPayloadAttributesEvent{Attributes: &derive.AttributesWithParent{}}) m.AssertExpectations(t) require.False(t, p.closing) @@ -113,49 +117,42 @@ func TestProgramDeriver(t *testing.T) { }) // Do not stop processing when the deriver is idle, the engine may still be busy and create further events. t.Run("deriver idle", func(t *testing.T) { - p, m := newProgram(t, 1000) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), derive.DeriverIdleEvent{}) - m.AssertExpectations(t) require.False(t, p.closing) require.NoError(t, p.resultError) }) // on inconsistent chain data: stop with error t.Run("reset event", func(t *testing.T) { - p, m := newProgram(t, 1000) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), rollup.ResetEvent{Err: errTestReset}) - m.AssertExpectations(t) require.True(t, p.closing) require.Error(t, p.resultError) }) // on L1 temporary error: stop with error t.Run("L1 temporary error event", func(t *testing.T) { - p, m := newProgram(t, 1000) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), rollup.L1TemporaryErrorEvent{Err: errTestTemp}) - m.AssertExpectations(t) require.True(t, p.closing) require.Error(t, p.resultError) }) // on engine temporary error: continue derivation (because legacy, not all connection related) t.Run("engine temp error event", func(t *testing.T) { - p, m := newProgram(t, 1000) - m.ExpectOnce(engine.PendingSafeRequestEvent{}) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), rollup.EngineTemporaryErrorEvent{Err: errTestTemp}) - m.AssertExpectations(t) require.False(t, p.closing) require.NoError(t, p.resultError) }) // on critical error: stop t.Run("critical error event", func(t *testing.T) { - p, m := newProgram(t, 1000) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), rollup.ResetEvent{Err: errTestCrit}) - m.AssertExpectations(t) require.True(t, p.closing) require.Error(t, p.resultError) }) t.Run("unknown event", func(t *testing.T) { - p, m := newProgram(t, 1000) + p, _ := newProgram(t, 1000) p.OnEvent(context.Background(), TestEvent{}) - m.AssertExpectations(t) require.False(t, p.closing) require.NoError(t, p.resultError) }) diff --git a/op-program/client/interop/interop.go b/op-program/client/interop/interop.go index 00f191be35371..8fa13458feae8 100644 --- a/op-program/client/interop/interop.go +++ b/op-program/client/interop/interop.go @@ -38,6 +38,7 @@ type taskExecutor interface { RunDerivation( logger log.Logger, rollupCfg *rollup.Config, + l1ChainConfig *params.ChainConfig, depSet depset.DependencySet, l2ChainConfig *params.ChainConfig, l1Head common.Hash, @@ -107,7 +108,7 @@ func stateTransition(logger log.Logger, bootInfo *boot.BootInfoInterop, l1Preima } else if transitionState.Step == ConsolidateStep { logger.Info("Running consolidate step") // sanity check - if len(transitionState.PendingProgress) >= ConsolidateStep { + if len(transitionState.PendingProgress) > ConsolidateStep { return common.Hash{}, fmt.Errorf("%w: pending progress length does not match the expected step", ErrInvalidPrestate) } expectedSuperRoot, err := RunConsolidation( @@ -153,7 +154,11 @@ func deriveOptimisticBlock(logger log.Logger, bootInfo *boot.BootInfoInterop, l1 } l2ChainConfig, err := bootInfo.Configs.ChainConfig(chainAgreedPrestate.ChainID) if err != nil { - return types.OptimisticBlock{}, fmt.Errorf("no chain config available for chain ID %v: %w", chainAgreedPrestate.ChainID, err) + return types.OptimisticBlock{}, fmt.Errorf("no l2 chain config available for chain ID %v: %w", chainAgreedPrestate.ChainID, err) + } + l1ChainConfig, err := bootInfo.Configs.L1ChainConfig(eth.ChainIDFromBig(rollupCfg.L1ChainID)) + if err != nil { + return types.OptimisticBlock{}, fmt.Errorf("no l1 chain config available for chain ID %v: %w", eth.ChainIDFromBig(rollupCfg.L1ChainID), err) } depSet, err := bootInfo.Configs.DependencySet(chainAgreedPrestate.ChainID) if err != nil { @@ -166,6 +171,7 @@ func deriveOptimisticBlock(logger log.Logger, bootInfo *boot.BootInfoInterop, l1 derivationResult, err := tasks.RunDerivation( logger, rollupCfg, + l1ChainConfig, depSet, l2ChainConfig, bootInfo.L1Head, @@ -194,6 +200,7 @@ type interopTaskExecutor struct { func (t *interopTaskExecutor) RunDerivation( logger log.Logger, rollupCfg *rollup.Config, + l1ChainConfig *params.ChainConfig, depSet depset.DependencySet, l2ChainConfig *params.ChainConfig, l1Head common.Hash, @@ -205,6 +212,7 @@ func (t *interopTaskExecutor) RunDerivation( return tasks.RunDerivation( logger, rollupCfg, + l1ChainConfig, depSet, l2ChainConfig, l1Head, diff --git a/op-program/client/interop/interop_test.go b/op-program/client/interop/interop_test.go index e61aa18128975..527e7d04c8c8e 100644 --- a/op-program/client/interop/interop_test.go +++ b/op-program/client/interop/interop_test.go @@ -35,6 +35,7 @@ import ( type chainSetupOpts struct { expiryWindow uint64 + chainCount int } func WithExpiryWindow(window uint64) func(*chainSetupOpts) { @@ -43,49 +44,65 @@ func WithExpiryWindow(window uint64) func(*chainSetupOpts) { } } +func WithChainCount(count int) func(*chainSetupOpts) { + return func(opts *chainSetupOpts) { + opts.chainCount = count + } +} + func setupTwoChains(opts ...func(*chainSetupOpts)) (*staticConfigSource, *eth.SuperV1, *stubTasks) { + opts = append(opts, WithChainCount(2)) + return setupChains(opts...) +} + +func setupChains(opts ...func(setupOpts *chainSetupOpts)) (*staticConfigSource, *eth.SuperV1, *stubTasks) { chainSetupOpts := &chainSetupOpts{} for _, opt := range opts { opt(chainSetupOpts) } - rollupCfg1 := *chaincfg.OPSepolia() - chainCfg1 := *chainconfig.OPSepoliaChainConfig() - - rollupCfg2 := *chaincfg.OPSepolia() - rollupCfg2.L2ChainID = new(big.Int).SetUint64(42) - chainCfg2 := *chainconfig.OPSepoliaChainConfig() - chainCfg2.ChainID = rollupCfg2.L2ChainID - - // activate interop at genesis for both - rollupCfg1.InteropTime = new(uint64) - rollupCfg2.InteropTime = new(uint64) + rollupCfgs := make([]*rollup.Config, 0, chainSetupOpts.chainCount) + + chainCfgs := make([]*params.ChainConfig, 0, chainSetupOpts.chainCount) + chainIDAndOutputs := make([]eth.ChainIDAndOutput, 0, chainSetupOpts.chainCount) + dependencies := make(map[eth.ChainID]*depset.StaticConfigDependency, chainSetupOpts.chainCount) + chainIDs := make([]eth.ChainID, 0, chainSetupOpts.chainCount) + + for i := 0; i < chainSetupOpts.chainCount; i++ { + rollupCfg := *chaincfg.OPSepolia() + rollupCfg.L2ChainID = big.NewInt(int64(i)) + // activate interop at genesis + rollupCfg.InteropTime = new(uint64) + chainCfg := *chainconfig.OPSepoliaChainConfig() + chainCfg.ChainID = rollupCfg.L2ChainID + rollupCfgs = append(rollupCfgs, &rollupCfg) + chainCfgs = append(chainCfgs, &chainCfg) + chainIDs = append(chainIDs, eth.ChainIDFromBig(rollupCfg.L2ChainID)) + + chainIDAndOutputs = append(chainIDAndOutputs, eth.ChainIDAndOutput{ + ChainID: eth.ChainIDFromBig(rollupCfg.L2ChainID), + Output: eth.OutputRoot(ð.OutputV0{BlockHash: common.Hash{byte(i)}}), + }) + dependencies[eth.ChainIDFromBig(rollupCfg.L2ChainID)] = &depset.StaticConfigDependency{} + } agreedSuperRoot := ð.SuperV1{ - Timestamp: rollupCfg1.Genesis.L2Time + 1234, - Chains: []eth.ChainIDAndOutput{ - {ChainID: eth.ChainIDFromBig(rollupCfg1.L2ChainID), Output: eth.OutputRoot(ð.OutputV0{BlockHash: common.Hash{0x11}})}, - {ChainID: eth.ChainIDFromBig(rollupCfg2.L2ChainID), Output: eth.OutputRoot(ð.OutputV0{BlockHash: common.Hash{0x22}})}, - }, + Timestamp: rollupCfgs[0].Genesis.L2Time + 1234, + Chains: chainIDAndOutputs, } var ds *depset.StaticConfigDependencySet if chainSetupOpts.expiryWindow > 0 { - ds, _ = depset.NewStaticConfigDependencySetWithMessageExpiryOverride(map[eth.ChainID]*depset.StaticConfigDependency{ - eth.ChainIDFromBig(rollupCfg1.L2ChainID): {}, - eth.ChainIDFromBig(rollupCfg2.L2ChainID): {}, - }, chainSetupOpts.expiryWindow) + ds, _ = depset.NewStaticConfigDependencySetWithMessageExpiryOverride(dependencies, chainSetupOpts.expiryWindow) } else { - ds, _ = depset.NewStaticConfigDependencySet(map[eth.ChainID]*depset.StaticConfigDependency{ - eth.ChainIDFromBig(rollupCfg1.L2ChainID): {}, - eth.ChainIDFromBig(rollupCfg2.L2ChainID): {}, - }) + ds, _ = depset.NewStaticConfigDependencySet(dependencies) } configSource := &staticConfigSource{ - rollupCfgs: []*rollup.Config{&rollupCfg1, &rollupCfg2}, - chainConfigs: []*params.ChainConfig{&chainCfg1, &chainCfg2}, - depset: ds, - chainIDs: []eth.ChainID{eth.ChainIDFromBig(rollupCfg1.L2ChainID), eth.ChainIDFromBig(rollupCfg2.L2ChainID)}, + rollupCfgs: rollupCfgs, + chainConfigs: chainCfgs, + l1ChainConfig: params.SepoliaChainConfig, + depset: ds, + chainIDs: chainIDs, } tasksStub := &stubTasks{ l2SafeHead: eth.L2BlockRef{Number: 918429823450218}, // Past the claimed block @@ -729,6 +746,64 @@ func TestHazardSet_ExpiredMessageShortCircuitsInclusionCheck(t *testing.T) { }) } +func TestMaximumNumberOfChains(t *testing.T) { + logger := testlog.Logger(t, log.LevelError) + chainCount := ConsolidateStep + configSource, agreedSuperRoot, tasksStub := setupChains(WithChainCount(chainCount)) + defer tasksStub.AssertExpectations(t) + rng := rand.New(rand.NewSource(123)) + + agreedHash := common.Hash(eth.SuperRoot(agreedSuperRoot)) + pendingProgress := make([]types.OptimisticBlock, 0, chainCount) + step := uint64(0) + l2PreimageOracle, _ := test.NewStubOracle(t) + l2PreimageOracle.TransitionStates[agreedHash] = &types.TransitionState{SuperRoot: agreedSuperRoot.Marshal()} + + // Generate an optimistic block for every chain + for _, cfg := range configSource.rollupCfgs { + block, rcpts := createBlock(rng, cfg, 100, nil) + l2PreimageOracle.Receipts[block.Hash()] = rcpts + tasksStub.blockHash = block.Hash() + output := createOutput(tasksStub.blockHash) + tasksStub.outputRoot = eth.OutputRoot(output) + newPendingProgress := append(pendingProgress, types.OptimisticBlock{BlockHash: tasksStub.blockHash, OutputRoot: tasksStub.outputRoot}) + expectedIntermediateRoot := &types.TransitionState{ + SuperRoot: agreedSuperRoot.Marshal(), + PendingProgress: newPendingProgress, + Step: step + 1, + } + + expectedClaim := expectedIntermediateRoot.Hash() + verifyResult(t, logger, tasksStub, configSource, l2PreimageOracle, agreedHash, agreedSuperRoot.Timestamp+100000, expectedClaim) + pendingProgress = newPendingProgress + agreedHash = expectedIntermediateRoot.Hash() + l2PreimageOracle.TransitionStates[agreedHash] = expectedIntermediateRoot + l2PreimageOracle.Outputs[common.Hash(tasksStub.outputRoot)] = output + l2PreimageOracle.Blocks[tasksStub.blockHash] = block + step++ + } + + // Populate initial agreed blocks + for i, chain := range agreedSuperRoot.Chains { + block, _ := createBlock(rng, configSource.rollupCfgs[i], 99, nil) + l2PreimageOracle.Outputs[common.Hash(chain.Output)] = createOutput(block.Hash()) + l2PreimageOracle.Blocks[block.Hash()] = block + } + // Run the consolidate step + finalOutputs := make([]eth.ChainIDAndOutput, 0, chainCount) + for i, block := range pendingProgress { + finalOutputs = append(finalOutputs, eth.ChainIDAndOutput{ + ChainID: configSource.chainIDs[i], + Output: block.OutputRoot, + }) + } + expectedClaim := common.Hash(eth.SuperRoot(ð.SuperV1{ + Timestamp: agreedSuperRoot.Timestamp + 1, + Chains: finalOutputs, + })) + verifyResult(t, logger, tasksStub, configSource, l2PreimageOracle, agreedHash, agreedSuperRoot.Timestamp+100000, expectedClaim) +} + type mockConsolidateDeps struct { mock.Mock *consolidateCheckDeps @@ -774,6 +849,7 @@ var _ taskExecutor = (*stubTasks)(nil) func (t *stubTasks) RunDerivation( _ log.Logger, _ *rollup.Config, + _ *params.ChainConfig, _ depset.DependencySet, _ *params.ChainConfig, _ common.Hash, @@ -835,10 +911,11 @@ func (t *stubTasks) ExpectBuildDepositOnlyBlock( } type staticConfigSource struct { - rollupCfgs []*rollup.Config - chainConfigs []*params.ChainConfig - depset *depset.StaticConfigDependencySet - chainIDs []eth.ChainID + rollupCfgs []*rollup.Config + chainConfigs []*params.ChainConfig + l1ChainConfig *params.ChainConfig + depset *depset.StaticConfigDependencySet + chainIDs []eth.ChainID } func (s *staticConfigSource) RollupConfig(chainID eth.ChainID) (*rollup.Config, error) { @@ -859,6 +936,10 @@ func (s *staticConfigSource) ChainConfig(chainID eth.ChainID) (*params.ChainConf panic(fmt.Sprintf("no chain config found for chain %d", chainID)) } +func (s *staticConfigSource) L1ChainConfig(l1ChainID eth.ChainID) (*params.ChainConfig, error) { + return s.l1ChainConfig, nil +} + func (s *staticConfigSource) DependencySet(chainID eth.ChainID) (depset.DependencySet, error) { return s.depset, nil } diff --git a/op-program/client/interop/oracle_test.go b/op-program/client/interop/oracle_test.go index abcfb10e336c0..6acb762150bd2 100644 --- a/op-program/client/interop/oracle_test.go +++ b/op-program/client/interop/oracle_test.go @@ -251,3 +251,6 @@ func (o *OracleHinterStub) HintBlockExecution(parentBlockHash common.Hash, attr func (o *OracleHinterStub) HintWithdrawalsRoot(blockHash common.Hash, chainID eth.ChainID) { } + +func (o *OracleHinterStub) HintBlockHashLookup(blockNumber uint64, headBlockHash common.Hash, l2ChainID eth.ChainID) { +} diff --git a/op-program/client/l2/db.go b/op-program/client/l2/db.go index 0c1f2c3714cac..1d3a8ac5253c5 100644 --- a/op-program/client/l2/db.go +++ b/op-program/client/l2/db.go @@ -74,6 +74,10 @@ func (o *OracleKeyValueStore) Close() error { // Remaining methods are unused when accessing the state for block processing so leaving unimplemented. +func (o *OracleKeyValueStore) SyncKeyValue() error { + panic("not supported") +} + func (o *OracleKeyValueStore) Has(key []byte) (bool, error) { panic("not supported") } diff --git a/op-program/client/l2/engine_test.go b/op-program/client/l2/engine_test.go index 183465ec39fff..fbb7769b90833 100644 --- a/op-program/client/l2/engine_test.go +++ b/op-program/client/l2/engine_test.go @@ -190,10 +190,11 @@ func createOracleEngine(t *testing.T, headBlockOnIsthmus bool) (*OracleEngine, * } func createL2Block(t *testing.T, number int, setWithdrawalsRoot bool) *types.Block { - tx, err := derive.L1InfoDeposit(chaincfg.OPSepolia(), eth.SystemConfig{}, uint64(1), eth.HeaderBlockInfo(&types.Header{ - Number: big.NewInt(32), - BaseFee: big.NewInt(7), - }), 0) + tx, err := derive.L1InfoDeposit(chaincfg.OPSepolia(), params.MergedTestChainConfig, + eth.SystemConfig{}, uint64(1), eth.HeaderBlockInfo(&types.Header{ + Number: big.NewInt(32), + BaseFee: big.NewInt(7), + }), 0) require.NoError(t, err) header := &types.Header{ Number: big.NewInt(int64(number)), diff --git a/op-program/client/l2/engineapi/block_processor.go b/op-program/client/l2/engineapi/block_processor.go index 5110ca453a703..fa2259ec39952 100644 --- a/op-program/client/l2/engineapi/block_processor.go +++ b/op-program/client/l2/engineapi/block_processor.go @@ -62,7 +62,9 @@ func NewBlockProcessorFromPayloadAttributes(provider BlockDataProvider, parent c d = provider.Config().BaseFeeChangeDenominator(header.Time) e = provider.Config().ElasticityMultiplier() } - header.Extra = eip1559.EncodeHoloceneExtraData(d, e) + if provider.Config().IsOptimismHolocene(header.Time) { + header.Extra = eip1559.EncodeOptimismExtraData(provider.Config(), header.Time, d, e, attrs.MinBaseFee) + } } return NewBlockProcessorFromHeader(provider, header) diff --git a/op-program/client/l2/engineapi/l2_engine_api.go b/op-program/client/l2/engineapi/l2_engine_api.go index 5ae6391f29165..d3a42079391c1 100644 --- a/op-program/client/l2/engineapi/l2_engine_api.go +++ b/op-program/client/l2/engineapi/l2_engine_api.go @@ -107,6 +107,9 @@ func computePayloadId(headBlockHash common.Hash, attrs *eth.PayloadAttributes) e if attrs.EIP1559Params != nil { hasher.Write(attrs.EIP1559Params[:]) } + if attrs.MinBaseFee != nil { + _ = binary.Write(hasher, binary.BigEndian, *attrs.MinBaseFee) + } var out engine.PayloadID copy(out[:], hasher.Sum(nil)[:8]) return out @@ -355,19 +358,18 @@ func (ea *L2EngineAPI) NewPayloadV3(ctx context.Context, params *eth.ExecutionPa return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil parentBeaconBlockRoot post-cancun")) } - if !ea.config().IsCancun(new(big.Int).SetUint64(uint64(params.BlockNumber)), uint64(params.Timestamp)) { + cfg := ea.config() + + if !cfg.IsCancun(new(big.Int).SetUint64(uint64(params.BlockNumber)), uint64(params.Timestamp)) { return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.UnsupportedFork.With(errors.New("newPayloadV3 called pre-cancun")) } - // Payload must have eip-1559 params in ExtraData after Holocene - if ea.config().IsHolocene(uint64(params.Timestamp)) { - if err := eip1559.ValidateHoloceneExtraData(params.ExtraData); err != nil { - return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.UnsupportedFork.With(errors.New("invalid holocene extraData post-holocene")) - } + if err := eip1559.ValidateOptimismExtraData(cfg, uint64(params.Timestamp), params.ExtraData); err != nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.UnsupportedFork.With(err) } // Payload must have WithdrawalsRoot after Isthmus - if ea.config().IsIsthmus(uint64(params.Timestamp)) { + if cfg.IsIsthmus(uint64(params.Timestamp)) { if params.WithdrawalsRoot == nil { return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.UnsupportedFork.With(errors.New("nil withdrawalsRoot post-isthmus")) } diff --git a/op-program/client/l2/engineapi/l2_engine_api_test.go b/op-program/client/l2/engineapi/l2_engine_api_test.go index a3588300453cf..c717917f4f339 100644 --- a/op-program/client/l2/engineapi/l2_engine_api_test.go +++ b/op-program/client/l2/engineapi/l2_engine_api_test.go @@ -5,6 +5,7 @@ import ( "math/big" "testing" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/beacon/engine" @@ -17,7 +18,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -35,9 +35,7 @@ func TestNewPayloadV4(t *testing.T) { logger, _ := testlog.CaptureLogger(t, log.LvlInfo) for _, c := range cases { - genesis := createGenesis() - isthmusTime := c.isthmusTime - genesis.Config.IsthmusTime = &isthmusTime + genesis := createGenesisWithForkTimeOffset(c.isthmusTime) ethCfg := ðconfig.Config{ NetworkId: genesis.Config.ChainID.Uint64(), Genesis: genesis, @@ -50,6 +48,8 @@ func TestNewPayloadV4(t *testing.T) { genesisBlock := backend.GetHeaderByNumber(0) genesisHash := genesisBlock.Hash() eip1559Params := eth.Bytes8([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}) + minBaseFee := uint64(1e9) + minBaseFeePtr := &minBaseFee gasLimit := eth.Uint64Quantity(4712388) result, err := engineAPI.ForkchoiceUpdatedV3(context.Background(), ð.ForkchoiceState{ HeadBlockHash: genesisHash, @@ -64,6 +64,7 @@ func TestNewPayloadV4(t *testing.T) { NoTxPool: false, GasLimit: &gasLimit, EIP1559Params: &eip1559Params, + MinBaseFee: minBaseFeePtr, }) require.NoError(t, err) require.EqualValues(t, engine.VALID, result.PayloadStatus.Status) @@ -102,6 +103,9 @@ func TestCreatedBlocksAreCached(t *testing.T) { genesis := backend.GetHeaderByNumber(0) genesisHash := genesis.Hash() eip1559Params := eth.Bytes8([]byte{0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}) + minBaseFee := uint64(1e9) + minBaseFeePtr := &minBaseFee + gasLimit := eth.Uint64Quantity(genesis.GasLimit) result, err := engineAPI.ForkchoiceUpdatedV3(context.Background(), ð.ForkchoiceState{ HeadBlockHash: genesisHash, SafeBlockHash: genesisHash, @@ -113,8 +117,9 @@ func TestCreatedBlocksAreCached(t *testing.T) { Withdrawals: &types.Withdrawals{}, ParentBeaconBlockRoot: &common.Hash{0x22}, NoTxPool: false, - GasLimit: (*eth.Uint64Quantity)(&genesis.GasLimit), + GasLimit: &gasLimit, EIP1559Params: &eip1559Params, + MinBaseFee: minBaseFeePtr, }) require.NoError(t, err) require.EqualValues(t, engine.VALID, result.PayloadStatus.Status) @@ -160,25 +165,53 @@ func newStubBackend(t *testing.T) *stubCachingBackend { } func createGenesis() *core.Genesis { - config := *params.MergedTestChainConfig - config.PragueTime = nil - var zero uint64 - // activate recent OP-stack forks - config.RegolithTime = &zero - config.CanyonTime = &zero - config.EcotoneTime = &zero - config.FjordTime = &zero - config.GraniteTime = &zero - config.HoloceneTime = &zero - config.IsthmusTime = &zero - - l2Genesis := &core.Genesis{ - Config: &config, - Difficulty: common.Big0, - ParentHash: common.Hash{}, - BaseFee: big.NewInt(7), - Alloc: map[common.Address]types.Account{}, - ExtraData: []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8}, // for Holocene eip-1559 params + return createGenesisWithForkTimeOffset(0) +} + +func createGenesisWithForkTimeOffset(forkTimeOffset uint64) *core.Genesis { + deployConfig := &genesis.DeployConfig{ + L2InitializationConfig: genesis.L2InitializationConfig{ + DevDeployConfig: genesis.DevDeployConfig{ + FundDevAccounts: true, + }, + L2GenesisBlockDeployConfig: genesis.L2GenesisBlockDeployConfig{ + L2GenesisBlockGasLimit: 30_000_000, + L2GenesisBlockDifficulty: (*hexutil.Big)(big.NewInt(100)), + }, + L2CoreDeployConfig: genesis.L2CoreDeployConfig{ + L1ChainID: 900, + L2ChainID: 901, + L2BlockTime: 2, + }, + UpgradeScheduleDeployConfig: genesis.UpgradeScheduleDeployConfig{ + L1CancunTimeOffset: new(hexutil.Uint64), + }, + }, + } + + // Enable all forks up to the specified time + ts := hexutil.Uint64(0) + deployConfig.L2GenesisRegolithTimeOffset = &ts + deployConfig.L2GenesisCanyonTimeOffset = &ts + deployConfig.L2GenesisDeltaTimeOffset = &ts + deployConfig.L2GenesisEcotoneTimeOffset = &ts + deployConfig.L2GenesisFjordTimeOffset = &ts + deployConfig.L2GenesisGraniteTimeOffset = &ts + deployConfig.L2GenesisHoloceneTimeOffset = &ts + + // Set fork time for latest forks + offset := hexutil.Uint64(forkTimeOffset) + deployConfig.L2GenesisIsthmusTimeOffset = &offset + deployConfig.L2GenesisInteropTimeOffset = &offset + deployConfig.L2GenesisJovianTimeOffset = &offset + + l1Genesis, err := genesis.NewL1Genesis(deployConfig) + if err != nil { + panic(err) + } + l2Genesis, err := genesis.NewL2Genesis(deployConfig, eth.BlockRefFromHeader(l1Genesis.ToBlock().Header())) + if err != nil { + panic(err) } return l2Genesis diff --git a/op-program/client/l2/engineapi/precompiles.go b/op-program/client/l2/engineapi/precompiles.go index fa925d4bc38c0..bfc05c29005c8 100644 --- a/op-program/client/l2/engineapi/precompiles.go +++ b/op-program/client/l2/engineapi/precompiles.go @@ -187,6 +187,10 @@ func (c *ecrecoverOracle) Run(input []byte) ([]byte, error) { return result, nil } +func (c *ecrecoverOracle) Name() string { + return "ECRECOVER_ORACLE" +} + func allZero(b []byte) bool { for _, byte := range b { if byte != 0 { @@ -205,6 +209,10 @@ func (b *bn256PairingOracle) RequiredGas(input []byte) uint64 { return b.Orig.RequiredGas(input) } +func (b *bn256PairingOracle) Name() string { + return b.Orig.Name() +} + var ( // true32Byte is returned if the bn256 pairing check succeeds. true32Byte = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} @@ -249,6 +257,10 @@ func (b *bn256PairingOracleGranite) Run(input []byte) ([]byte, error) { return b.bn256PairingOracle.Run(input) } +func (b *bn256PairingOracleGranite) Name() string { + return b.Orig.Name() +} + // kzgPointEvaluationOracle implements the EIP-4844 point evaluation precompile, // using the preimage-oracle to perform the evaluation. type kzgPointEvaluationOracle struct { @@ -261,6 +273,10 @@ func (b *kzgPointEvaluationOracle) RequiredGas(input []byte) uint64 { return b.Orig.RequiredGas(input) } +func (b *kzgPointEvaluationOracle) Name() string { + return b.Orig.Name() +} + const ( blobVerifyInputLength = 192 // Max input length for the point evaluation precompile. blobPrecompileReturnValue = "000000000000000000000000000000000000000000000000000000000000100073eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001" @@ -364,6 +380,10 @@ func (b *blsOperationOracle) Run(input []byte) ([]byte, error) { return result, nil } +func (b *blsOperationOracle) Name() string { + return b.Orig.Name() +} + type blsOperationOracleWithSizeLimit struct { blsOperationOracle sizeLimit uint64 diff --git a/op-program/client/l2/engineapi/precompiles_test.go b/op-program/client/l2/engineapi/precompiles_test.go index a102b78d1a6af..c4627e7764358 100644 --- a/op-program/client/l2/engineapi/precompiles_test.go +++ b/op-program/client/l2/engineapi/precompiles_test.go @@ -398,6 +398,10 @@ func (s *stubPrecompile) Run(_ []byte) ([]byte, error) { return stubResult, nil } +func (s *stubPrecompile) Name() string { + return "STUB" +} + type stubPrecompileOracle struct { result []byte failureResponse bool diff --git a/op-program/client/l2/engineapi/test/l2_engine_api_tests.go b/op-program/client/l2/engineapi/test/l2_engine_api_tests.go index 77168255e9629..062f70cd60070 100644 --- a/op-program/client/l2/engineapi/test/l2_engine_api_tests.go +++ b/op-program/client/l2/engineapi/test/l2_engine_api_tests.go @@ -39,7 +39,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi. api := newTestHelper(t, createBackend) genesis := api.backend.CurrentHeader() - txData, err := derive.L1InfoDeposit(rollupCfg, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0) + txData, err := derive.L1InfoDeposit(rollupCfg, params.MergedTestChainConfig, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0) api.assert.NoError(err) tx := types.NewTx(txData) block := api.addBlock(tx) @@ -57,7 +57,7 @@ func RunEngineAPITests(t *testing.T, createBackend func(t *testing.T) engineapi. api := newTestHelper(t, createBackend) genesis := api.backend.CurrentHeader() - txData, err := derive.L1InfoDeposit(rollupCfg, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0) + txData, err := derive.L1InfoDeposit(rollupCfg, params.MergedTestChainConfig, eth.SystemConfig{}, 1, eth.HeaderBlockInfo(genesis), 0) api.assert.NoError(err) txData.Gas = uint64(gasLimit + 1) tx := types.NewTx(txData) diff --git a/op-program/client/l2/fast_canon.go b/op-program/client/l2/fast_canon.go index 053c7c1a88de6..dc4973af99a33 100644 --- a/op-program/client/l2/fast_canon.go +++ b/op-program/client/l2/fast_canon.go @@ -5,6 +5,7 @@ import ( "fmt" "math" + l2Types "github.com/ethereum-optimism/optimism/op-program/client/l2/types" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" @@ -33,6 +34,7 @@ type FastCanonicalBlockHeaderOracle struct { ctx *chainContext db ethdb.KeyValueStore cache *simplelru.LRU[uint64, *types.Header] + hinter l2Types.OracleHinter } func NewFastCanonicalBlockHeaderOracle( @@ -54,6 +56,7 @@ func NewFastCanonicalBlockHeaderOracle( fallback: fallback, ctx: ctx, db: db, + hinter: stateOracle.Hinter(), cache: cache, } } @@ -88,8 +91,8 @@ func (o *FastCanonicalBlockHeaderOracle) GetHeaderByNumber(n uint64) *types.Head for h.Number.Uint64() > n { headNumber := h.Number.Uint64() var currEarliestHistory uint64 - if params.HistoryServeWindow-1 < headNumber { - currEarliestHistory = headNumber - (params.HistoryServeWindow - 1) + if params.HistoryServeWindow < headNumber { + currEarliestHistory = headNumber - params.HistoryServeWindow } if currEarliestHistory <= n { block := o.getHistoricalBlockHash(h, n) @@ -109,6 +112,9 @@ func (o *FastCanonicalBlockHeaderOracle) GetHeaderByNumber(n uint64) *types.Head } func (o *FastCanonicalBlockHeaderOracle) getHistoricalBlockHash(head *types.Header, n uint64) *types.Block { + if o.hinter != nil { + o.hinter.HintBlockHashLookup(n, head.Hash(), eth.ChainIDFromBig(o.config.ChainID)) + } statedb, err := state.New(head.Root, state.NewDatabase(triedb.NewDatabase(rawdb.NewDatabase(o.db), nil), nil)) if err != nil { panic(fmt.Errorf("failed to get state at %v: %w", head.Hash(), err)) diff --git a/op-program/client/l2/fast_canon_test.go b/op-program/client/l2/fast_canon_test.go index 5bd98856a26bf..eb299f9a1fdf6 100644 --- a/op-program/client/l2/fast_canon_test.go +++ b/op-program/client/l2/fast_canon_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-program/client/l2/test" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum/go-ethereum/common" @@ -12,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -20,7 +22,13 @@ func TestFastCanonBlockHeaderOracle_GetHeaderByNumber(t *testing.T) { logger, _ := testlog.CaptureLogger(t, log.LvlInfo) miner, backend := test.NewMiner(t, logger, 0) - stateOracle := &test.KvStateOracle{T: t, Source: backend.TrieDB().Disk()} + chainID := eth.ChainIDFromBig(backend.Config().ChainID) + capturingHinter := &test.CapturingHinter{} + stateOracle := &test.KvStateOracle{ + T: t, + Source: backend.TrieDB().Disk(), + StubHinter: NewPreimageHinter(capturingHinter), + } miner.Mine(t, nil) miner.Mine(t, nil) miner.Mine(t, nil) @@ -50,12 +58,31 @@ func TestFastCanonBlockHeaderOracle_GetHeaderByNumber(t *testing.T) { h := canon.GetHeaderByNumber(3) require.Equal(t, backend.GetBlockByNumber(3).Hash(), h.Hash()) + require.Len(t, capturingHinter.Hints, 0) // No lookups required h = canon.GetHeaderByNumber(2) require.Equal(t, backend.GetBlockByNumber(2).Hash(), h.Hash()) + require.Len(t, capturingHinter.Hints, 1) + require.Equal(t, capturingHinter.Hints[0], BlockHashLookupHint{ + BlockNumber: 2, + HeadBlockHash: head.Hash(), + ChainID: chainID, + }) h = canon.GetHeaderByNumber(1) require.Equal(t, backend.GetBlockByNumber(1).Hash(), h.Hash()) + require.Len(t, capturingHinter.Hints, 2) + require.Equal(t, capturingHinter.Hints[1], BlockHashLookupHint{ + BlockNumber: 1, + HeadBlockHash: head.Hash(), + ChainID: chainID, + }) h = canon.GetHeaderByNumber(0) require.Equal(t, backend.GetBlockByNumber(0).Hash(), h.Hash()) + require.Len(t, capturingHinter.Hints, 3) + require.Equal(t, capturingHinter.Hints[2], BlockHashLookupHint{ + BlockNumber: 0, + HeadBlockHash: head.Hash(), + ChainID: chainID, + }) } func TestFastCanonBlockHeaderOracle_LargeWindow(t *testing.T) { @@ -64,7 +91,7 @@ func TestFastCanonBlockHeaderOracle_LargeWindow(t *testing.T) { logger, _ := testlog.CaptureLogger(t, log.LvlInfo) miner, backend := test.NewMiner(t, logger, 0) stateOracle := &test.KvStateOracle{T: t, Source: backend.TrieDB().Disk()} - numBlocks := 16384 // params.HistoryServeWindow * 2 + numBlocks := params.HistoryServeWindow*2 + 2 // 16384 for i := 0; i < numBlocks; i++ { miner.Mine(t, nil) } @@ -247,7 +274,12 @@ func TestFastCanonBlockHeaderOracle_SetCanonical(t *testing.T) { func runCanonicalCacheTest(t *testing.T, backend *core.BlockChain, blockNum uint64, expectedNumRequests int) { head := backend.CurrentHeader() tracker := newTrackingBlockByHash(backend.GetBlockByHash) - stateOracle := &test.KvStateOracle{T: t, Source: backend.TrieDB().Disk()} + capturingHinter := &test.CapturingHinter{} + stateOracle := &test.KvStateOracle{ + T: t, + Source: backend.TrieDB().Disk(), + StubHinter: NewPreimageHinter(capturingHinter), + } // Create invalid fallback to assert that it's never used. fatalBlockByHash := func(hash common.Hash) *types.Block { t.Fatalf("Unexpected fallback for block: %v", hash) @@ -261,12 +293,15 @@ func runCanonicalCacheTest(t *testing.T, backend *core.BlockChain, blockNum uint h := canon.GetHeaderByNumber(blockNum) require.Equal(t, expect, h.Hash()) require.Equalf(t, expectedNumRequests, tracker.numRequests, "Unexpected number of requests for block: %v (%d)", expect, blockNum) + require.Len(t, capturingHinter.Hints, expectedNumRequests) // query again and assert that it's cached tracker.numRequests = 0 + capturingHinter.Hints = nil h = canon.GetHeaderByNumber(blockNum) require.Equal(t, expect, h.Hash()) require.Equalf(t, 1, tracker.numRequests, "Unexpected number of requests for block: %v (%d)", expect, blockNum) + require.Len(t, capturingHinter.Hints, 1) } type trackingBlockByHash struct { diff --git a/op-program/client/l2/hints.go b/op-program/client/l2/hints.go index defd8ec829e6e..b691b0bfee650 100644 --- a/op-program/client/l2/hints.go +++ b/op-program/client/l2/hints.go @@ -15,16 +15,17 @@ import ( ) const ( - HintL2BlockHeader = "l2-block-header" - HintL2Transactions = "l2-transactions" - HintL2Receipts = "l2-receipts" - HintL2Code = "l2-code" - HintL2StateNode = "l2-state-node" - HintL2Output = "l2-output" - HintL2BlockData = "l2-block-data" - HintAgreedPrestate = "agreed-pre-state" - HintL2AccountProof = "l2-account-proof" - HintL2PayloadWitness = "l2-payload-witness" + HintL2BlockHeader = "l2-block-header" + HintL2Transactions = "l2-transactions" + HintL2Receipts = "l2-receipts" + HintL2Code = "l2-code" + HintL2StateNode = "l2-state-node" + HintL2Output = "l2-output" + HintL2BlockData = "l2-block-data" + HintAgreedPrestate = "agreed-pre-state" + HintL2AccountProof = "l2-account-proof" + HintL2PayloadWitness = "l2-payload-witness" + HintL2BlockHashLookup = "l2-block-hash-lookup" ) type LegacyBlockHeaderHint common.Hash @@ -185,3 +186,25 @@ func (l PayloadWitnessHint) Hint() string { return HintL2PayloadWitness + " " + hexutil.Encode(marshaled) } + +type BlockHashLookupHint struct { + BlockNumber uint64 + HeadBlockHash common.Hash + ChainID eth.ChainID +} + +func (b BlockHashLookupHint) Hint() string { + hintBytes := make([]byte, 8+32+8) + + binary.BigEndian.PutUint64(hintBytes[0:8], b.BlockNumber) + copy(hintBytes[8:40], b.HeadBlockHash.Bytes()) + binary.BigEndian.PutUint64(hintBytes[40:], eth.EvilChainIDToUInt64(b.ChainID)) + + return HintL2BlockHashLookup + " " + hexutil.Encode(hintBytes) +} + +func (b BlockHashLookupHint) String() string { + return fmt.Sprintf("%v(%v, %v, %v)", HintL2BlockHashLookup, b.BlockNumber, b.HeadBlockHash, b.ChainID) +} + +var _ preimage.Hint = BlockHashLookupHint{} diff --git a/op-program/client/l2/oracle.go b/op-program/client/l2/oracle.go index 9e015243c50db..c490b60911b18 100644 --- a/op-program/client/l2/oracle.go +++ b/op-program/client/l2/oracle.go @@ -26,6 +26,9 @@ type StateOracle interface { // CodeByHash retrieves the contract code pre-image for a given hash. // codeHash should be retrieved from the world state account for a contract. CodeByHash(codeHash common.Hash, chainID eth.ChainID) []byte + + // Hinter provides an optional interface to provide proactive hints. + Hinter() l2Types.OracleHinter } // Oracle defines the high-level API used to retrieve L2 data. @@ -44,9 +47,6 @@ type Oracle interface { TransitionStateByRoot(root common.Hash) *interopTypes.TransitionState ReceiptsByBlockHash(blockHash common.Hash, chainID eth.ChainID) (*types.Block, types.Receipts) - - // Optional interface to provide proactive hints. - Hinter() l2Types.OracleHinter } type PreimageOracleHinter struct { @@ -70,6 +70,10 @@ func (p *PreimageOracleHinter) HintWithdrawalsRoot(blockHash common.Hash, chainI p.hint.Hint(AccountProofHint{BlockHash: blockHash, Address: predeploys.L2ToL1MessagePasserAddr, ChainID: chainID}) } +func (p *PreimageOracleHinter) HintBlockHashLookup(blockNumber uint64, headBlockHash common.Hash, l2ChainID eth.ChainID) { + p.hint.Hint(BlockHashLookupHint{BlockNumber: blockNumber, HeadBlockHash: headBlockHash, ChainID: l2ChainID}) +} + // PreimageOracle implements Oracle using by interfacing with the pure preimage.Oracle // to fetch pre-images to decode into the requested data. type PreimageOracle struct { diff --git a/op-program/client/l2/test/miner.go b/op-program/client/l2/test/miner.go index 156c7c3d24817..3d35c996bf3f5 100644 --- a/op-program/client/l2/test/miner.go +++ b/op-program/client/l2/test/miner.go @@ -40,6 +40,10 @@ func NewMiner(t *testing.T, logger log.Logger, isthmusTime uint64) (*Miner, *cor config.HoloceneTime = &zero config.IsthmusTime = &isthmusTime config.PragueTime = &isthmusTime + + // Disable future Ethereum forks for now + config.OsakaTime = nil + denomCanyon := uint64(250) config.Optimism = ¶ms.OptimismConfig{ EIP1559Denominator: 50, @@ -118,6 +122,10 @@ func (m *Miner) Fork(t *testing.T, blockNumber uint64, attrs *eth.PayloadAttribu GasLimit: &gasLimit, EIP1559Params: &eip1559Params, } + if m.backend.Config().IsMinBaseFee(head.Time) { + stub := uint64(1e9) + attrs.MinBaseFee = &stub + } } m.MineAt(t, head, attrs) } @@ -138,6 +146,10 @@ func (m *Miner) MineAt(t *testing.T, head *types.Header, attrs *eth.PayloadAttri GasLimit: &gasLimit, EIP1559Params: &eip1559Params, } + if m.backend.Config().IsMinBaseFee(head.Time) { + stub := uint64(1e9) + attrs.MinBaseFee = &stub + } } result, err := m.engineAPI.ForkchoiceUpdatedV3(context.Background(), ð.ForkchoiceState{ HeadBlockHash: hash, diff --git a/op-program/client/l2/test/stub_oracle.go b/op-program/client/l2/test/stub_oracle.go index bbe1737a3c9ab..602e434ccf0c8 100644 --- a/op-program/client/l2/test/stub_oracle.go +++ b/op-program/client/l2/test/stub_oracle.go @@ -4,6 +4,7 @@ import ( "encoding/binary" "testing" + preimage "github.com/ethereum-optimism/optimism/op-preimage" interopTypes "github.com/ethereum-optimism/optimism/op-program/client/interop/types" l2Types "github.com/ethereum-optimism/optimism/op-program/client/l2/types" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -104,8 +105,9 @@ func (o StubBlockOracle) ReceiptsByBlockHash(blockHash common.Hash, chainID eth. // KvStateOracle loads data from a source ethdb.KeyValueStore type KvStateOracle struct { - T *testing.T - Source ethdb.KeyValueStore + T *testing.T + Source ethdb.KeyValueStore + StubHinter l2Types.OracleHinter } func NewKvStateOracle(t *testing.T, db ethdb.KeyValueStore) *KvStateOracle { @@ -127,6 +129,10 @@ func (o *KvStateOracle) CodeByHash(hash common.Hash, chainID eth.ChainID) []byte return rawdb.ReadCode(o.Source, hash) } +func (o *KvStateOracle) Hinter() l2Types.OracleHinter { + return o.StubHinter +} + func NewStubStateOracle(t *testing.T) *StubStateOracle { return &StubStateOracle{ t: t, @@ -158,6 +164,10 @@ func (o *StubStateOracle) CodeByHash(hash common.Hash, chainID eth.ChainID) []by return data } +func (o *StubStateOracle) Hinter() l2Types.OracleHinter { + return nil +} + type StubPrecompileOracle struct { t *testing.T Results map[common.Hash]PrecompileResult @@ -183,3 +193,13 @@ func (o *StubPrecompileOracle) Precompile(address common.Address, input []byte, o.Calls++ return result.Result, result.Ok } + +type CapturingHinter struct { + Hints []preimage.Hint +} + +func (c *CapturingHinter) Hint(v preimage.Hint) { + c.Hints = append(c.Hints, v) +} + +var _ preimage.Hinter = (*CapturingHinter)(nil) diff --git a/op-program/client/l2/types/types.go b/op-program/client/l2/types/types.go index f3815638313c3..9823baeae4542 100644 --- a/op-program/client/l2/types/types.go +++ b/op-program/client/l2/types/types.go @@ -11,4 +11,5 @@ import ( type OracleHinter interface { HintBlockExecution(parentBlockHash common.Hash, attr eth.PayloadAttributes, chainID eth.ChainID) HintWithdrawalsRoot(blockHash common.Hash, chainID eth.ChainID) + HintBlockHashLookup(blockNumber uint64, headBlockHash common.Hash, l2ChainID eth.ChainID) } diff --git a/op-program/client/mpt/db.go b/op-program/client/mpt/db.go index e6cc79160b1cc..265d591e3a93b 100644 --- a/op-program/client/mpt/db.go +++ b/op-program/client/mpt/db.go @@ -114,4 +114,12 @@ func (p *DB) AncientDatadir() (string, error) { panic("not supported") } +func (p *DB) SyncAncient() error { + panic("not supported") +} + +func (p *DB) SyncKeyValue() error { + panic("not supported") +} + var _ ethdb.Database = (*DB)(nil) diff --git a/op-program/client/preinterop.go b/op-program/client/preinterop.go index f99835fb0ec6b..b9057acc76d3b 100644 --- a/op-program/client/preinterop.go +++ b/op-program/client/preinterop.go @@ -22,6 +22,7 @@ func RunPreInteropProgram( result, err := tasks.RunDerivation( logger, bootInfo.RollupConfig, + bootInfo.L1ChainConfig, nil, // No dependency set pre-interop bootInfo.L2ChainConfig, bootInfo.L1Head, diff --git a/op-program/client/tasks/deposits_block.go b/op-program/client/tasks/deposits_block.go index ab16fcf70ddd8..2a35c42a8eb13 100644 --- a/op-program/client/tasks/deposits_block.go +++ b/op-program/client/tasks/deposits_block.go @@ -145,10 +145,12 @@ func blockToDepositsOnlyAttributes(cfg *rollup.Config, block *types.Block, outpu NoTxPool: true, GasLimit: &gasLimit, } + if cfg.IsHolocene(block.Time()) { - d, e := eip1559.DecodeHoloceneExtraData(block.Extra()) + d, e, m := eip1559.DecodeOptimismExtraData(cfg, block.Time(), block.Extra()) eip1559Params := eth.Bytes8(eip1559.EncodeHolocene1559Params(d, e)) attrs.EIP1559Params = &eip1559Params + attrs.MinBaseFee = m } return attrs, nil } diff --git a/op-program/client/tasks/derive.go b/op-program/client/tasks/derive.go index 30aec383ff47e..6877b73c0f6a6 100644 --- a/op-program/client/tasks/derive.go +++ b/op-program/client/tasks/derive.go @@ -45,6 +45,7 @@ type DerivationOptions struct { func RunDerivation( logger log.Logger, cfg *rollup.Config, + l1ChainConfig *params.ChainConfig, depSet derive.DependencySet, l2Cfg *params.ChainConfig, l1Head common.Hash, @@ -63,7 +64,7 @@ func RunDerivation( l2Source := l2.NewOracleEngine(cfg, logger, engineBackend, l2Oracle.Hinter()) logger.Info("Starting derivation", "chainID", cfg.L2ChainID) - d := cldr.NewDriver(logger, cfg, depSet, l1Source, l1BlobsSource, l2Source, l2ClaimBlockNum) + d := cldr.NewDriver(logger, cfg, depSet, l1Source, l1BlobsSource, l2Source, l2ClaimBlockNum, l1ChainConfig) result, err := d.RunComplete() if err != nil { return DerivationResult{}, fmt.Errorf("failed to run program to completion: %w", err) diff --git a/op-program/compatibility-test/baseline-cannon-multithreaded-64-next.json b/op-program/compatibility-test/baseline-cannon-multithreaded-64-next.json index 28a8885269d49..dafbcaf7b3b09 100644 --- a/op-program/compatibility-test/baseline-cannon-multithreaded-64-next.json +++ b/op-program/compatibility-test/baseline-cannon-multithreaded-64-next.json @@ -640,70 +640,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "25ee1dccb8d01fcf5c5e7b3ac41188fa9c613a6ba12f07e368dd37f0fa21fa93" }, - { - "callStack": { - "function": "syscall.Flock", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential Incompatible Syscall Detected: 5071", - "severity": "CRITICAL", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "262876708b03addf04292a157bf304f20115cfe55bc65bd3de80ab72ffd5db49" - }, { "callStack": { "function": "golang.org/x/sys/unix.Sysinfo", @@ -1314,6 +1250,64 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "3cde82f65fb30a398c492e48a470104a376f4f9ea855026b5961fbc936ccaa04" }, + { + "callStack": { + "function": "golang.org/x/sys/unix.Flock", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential Incompatible Syscall Detected: 5071", + "severity": "CRITICAL", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "40fa0eb7f9e4191fa33b1c7eaa32d4e6733dab9f790c7afbbf1d72ab231e2e35" + }, { "callStack": { "function": "syscall.Ftruncate", @@ -3710,6 +3704,70 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "acc0d615135b118439c6cda3f95baf7b9c0e85aac025c87968f60b0421ac27ee" }, + { + "callStack": { + "function": "golang.org/x/sys/unix.Flock", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential Incompatible Syscall Detected: 5071", + "severity": "CRITICAL", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "ade27e0b52d1c4050f192d6155199c8112288f564e8d56ecf1690aa5ad3ae0f2" + }, { "callStack": { "function": "syscall.lstat", @@ -4431,64 +4489,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "d3a9e0e4814e9f74db0571ba63e4f9ec806ce8f085f6feb46f51b78d161685b3" }, - { - "callStack": { - "function": "syscall.Flock", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential Incompatible Syscall Detected: 5071", - "severity": "CRITICAL", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "d9e0c3a236defe9edbba0b197c64c4d2e2c21ac351cf77a56269c5db2c2c0167" - }, { "callStack": { "function": "syscall.lstat", @@ -4791,50 +4791,19 @@ }, { "callStack": { - "function": "internal/syscall/unix.GetRandom", + "function": "runtime.netpollclose", "callStack": { - "function": "crypto/internal/sysrand.read", + "function": "internal/poll.runtime_pollClose", "callStack": { - "function": "crypto/internal/sysrand.Read", + "function": "internal/poll.(*FD).destroy", "callStack": { - "function": "crypto/internal/entropy.Depleted", + "function": "internal/poll.(*FD).decref", "callStack": { - "function": "crypto/internal/fips140/drbg.Read", + "function": "internal/poll.(*FD).Close", "callStack": { - "function": "crypto/internal/fips140/drbg.ReadWithReader", + "function": "os.(*file).close", "callStack": { - "function": "crypto/internal/fips140/ecdh.GenerateKey[go.shape.*crypto/internal/fips140/nistec.P521Point]", - "callStack": { - "function": "crypto/ecdh.init.func9" - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5313", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "0299af5c9cd64575dca6ef14515c1c335e739d5c113b93139f70e221f3bff196" - }, - { - "callStack": { - "function": "runtime.netpollclose", - "callStack": { - "function": "internal/poll.runtime_pollClose", - "callStack": { - "function": "internal/poll.(*FD).destroy", - "callStack": { - "function": "internal/poll.(*FD).decref", - "callStack": { - "function": "internal/poll.(*FD).Close", - "callStack": { - "function": "os.(*file).close", - "callStack": { - "function": "os.removeAllFrom", + "function": "os.removeAllFrom", "callStack": { "function": "os.removeAll", "callStack": { @@ -7731,6 +7700,73 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "4fa487ac912e96dac666815d38f915ea81cae9f42db94c21a5e1cb04f78a1d97" }, + { + "callStack": { + "function": "syscall.stat", + "callStack": { + "function": "syscall.Stat", + "callStack": { + "function": "os.statNolog", + "callStack": { + "function": "os.Stat", + "callStack": { + "function": "github.com/ethereum/go-ethereum/common.FileExist", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadJournal", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadLayers", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5004", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "5013ea916f3e0f37ec609b3ca053b80c8f7aea0073d0b5a8452135c9fa1d9657" + }, { "callStack": { "function": "syscall.openat", @@ -10154,37 +10190,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "88ac28e344788a275b36d1274da5c5da8627c6c237849c6a30d93a1227fb0ba0" }, - { - "callStack": { - "function": "internal/syscall/unix.GetRandom", - "callStack": { - "function": "crypto/internal/sysrand.read", - "callStack": { - "function": "crypto/internal/sysrand.Read", - "callStack": { - "function": "crypto/internal/entropy.Depleted", - "callStack": { - "function": "crypto/internal/fips140/drbg.Read", - "callStack": { - "function": "crypto/internal/fips140/drbg.ReadWithReader", - "callStack": { - "function": "crypto/internal/fips140/ecdh.GenerateKey[go.shape.*crypto/internal/fips140/nistec.P384Point]", - "callStack": { - "function": "crypto/ecdh.init.func5" - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5313", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "8a9f47ff88e528d60c299294057d517fb1d37d4ed484937eebf7cf08f921302e" - }, { "callStack": { "function": "runtime.netpollinit", @@ -10396,6 +10401,88 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "8bdbc310bac5de456e009ee897e0331a9ad7a55271b9d2b598963076fd86fc92" }, + { + "callStack": { + "function": "runtime.netpollclose", + "callStack": { + "function": "internal/poll.runtime_pollClose", + "callStack": { + "function": "internal/poll.(*FD).destroy", + "callStack": { + "function": "internal/poll.(*FD).decref", + "callStack": { + "function": "internal/poll.(*FD).Close", + "callStack": { + "function": "os.(*file).close", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).resetFh", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5208", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "8c8bcd5c08c7f155c27bcf48b638e8951d4e209b74cf3bec1973eb45bebd74f7" + }, { "callStack": { "function": "syscall.openat", @@ -11011,28 +11098,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "97dfbb0bc343ead69931e67e17ef8db2cf7fcb0fce22dc61b8482dce9cd98c89" }, - { - "callStack": { - "function": "internal/syscall/unix.GetRandom", - "callStack": { - "function": "crypto/internal/sysrand.read", - "callStack": { - "function": "crypto/internal/sysrand.Read", - "callStack": { - "function": "crypto/internal/entropy.Depleted", - "callStack": { - "function": "crypto/internal/fips140/drbg.init.func1" - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5313", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "98e5dde5d38ec92fa907da4c2fcc8c0f88a7559d5bdff992696d4876d2a94c4d" - }, { "callStack": { "function": "runtime.tgkill", @@ -12128,21 +12193,21 @@ }, { "callStack": { - "function": "syscall.Seek", + "function": "runtime.netpollclose", "callStack": { - "function": "internal/poll.(*FD).Seek", + "function": "internal/poll.runtime_pollClose", "callStack": { - "function": "os.(*File).seek", + "function": "internal/poll.(*FD).destroy", "callStack": { - "function": "os.(*File).Seek", + "function": "internal/poll.(*FD).decref", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTableMeta).write", + "function": "internal/poll.(*FD).Close", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).doSync", + "function": "os.(*file).close", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).Close", + "function": "github.com/gofrs/flock.(*Flock).resetFh", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newTable", + "function": "github.com/gofrs/flock.(*Flock).Unlock", "callStack": { "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", "callStack": { @@ -12158,27 +12223,21 @@ "callStack": { "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } + "function": "main.main" } } } @@ -12202,88 +12261,66 @@ } } }, - "message": "Potential NOOP Syscall Detected: 5008", + "message": "Potential NOOP Syscall Detected: 5208", "severity": "WARNING", "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "b54b1cdac0dce3c02378c79e2fd5ea2c99b53648f9c7918b52383f2b5ad4dc20" + "hash": "b51d3f1ff926563ad6cda4d625444b22c19a1b0a88677169873182df0f147907" }, { "callStack": { - "function": "runtime.netpollinit", + "function": "syscall.Seek", "callStack": { - "function": "runtime.netpollGenericInit", + "function": "internal/poll.(*FD).Seek", "callStack": { - "function": "runtime.(*timers).addHeap", + "function": "os.(*File).seek", "callStack": { - "function": "runtime.(*timer).maybeAdd", + "function": "os.(*File).Seek", "callStack": { - "function": "runtime.blockTimerChan", + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTableMeta).write", "callStack": { - "function": "runtime.selectgo", + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).doSync", "callStack": { - "function": "github.com/ethereum/go-ethereum/p2p/nat.discoverPMP", + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).Close", "callStack": { - "function": "github.com/ethereum/go-ethereum/node.init.Any.func2.2" - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5285", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "b726c684145b290d0039ee9f525cfeda00d1743da1bfd59f85236cef26a35f52" - }, - { - "callStack": { - "function": "runtime.netpollclose", - "callStack": { - "function": "internal/poll.runtime_pollClose", - "callStack": { - "function": "internal/poll.(*FD).destroy", - "callStack": { - "function": "internal/poll.(*FD).decref", - "callStack": { - "function": "internal/poll.(*FD).Close", - "callStack": { - "function": "os.(*file).close", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "function": "github.com/ethereum/go-ethereum/core/rawdb.newTable", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", "callStack": { - "function": "main.main" + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } } } } @@ -12306,11 +12343,42 @@ } } }, - "message": "Potential NOOP Syscall Detected: 5208", + "message": "Potential NOOP Syscall Detected: 5008", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "b54b1cdac0dce3c02378c79e2fd5ea2c99b53648f9c7918b52383f2b5ad4dc20" + }, + { + "callStack": { + "function": "runtime.netpollinit", + "callStack": { + "function": "runtime.netpollGenericInit", + "callStack": { + "function": "runtime.(*timers).addHeap", + "callStack": { + "function": "runtime.(*timer).maybeAdd", + "callStack": { + "function": "runtime.blockTimerChan", + "callStack": { + "function": "runtime.selectgo", + "callStack": { + "function": "github.com/ethereum/go-ethereum/p2p/nat.discoverPMP", + "callStack": { + "function": "github.com/ethereum/go-ethereum/node.init.Any.func2.2" + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5285", "severity": "WARNING", "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "b9c7263a4b2bdd73a5030de130c9f26b56285141bed4b21cf372ac2bec7edf61" + "hash": "b726c684145b290d0039ee9f525cfeda00d1743da1bfd59f85236cef26a35f52" }, { "callStack": { @@ -12939,37 +13007,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "cdc85b076baeb732ebe46810a7798301c569410870a3d1842cdca42c8152e691" }, - { - "callStack": { - "function": "internal/syscall/unix.GetRandom", - "callStack": { - "function": "crypto/internal/sysrand.read", - "callStack": { - "function": "crypto/internal/sysrand.Read", - "callStack": { - "function": "crypto/internal/entropy.Depleted", - "callStack": { - "function": "crypto/internal/fips140/drbg.Read", - "callStack": { - "function": "crypto/internal/fips140/drbg.ReadWithReader", - "callStack": { - "function": "crypto/internal/fips140/ecdh.GenerateKey[go.shape.*crypto/internal/fips140/nistec.P256Point]", - "callStack": { - "function": "crypto/ecdh.init.func1" - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5313", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "cf70026ef86e5cc06804b0effc2d5c1c7b2b7a9f07876db1e7bffe04fe482655" - }, { "callStack": { "function": "syscall.fstat", @@ -14132,85 +14169,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "e765d842c669be18a8bd4092a0a168e55aaf724ff54d00020da8ca27b872f888" }, - { - "callStack": { - "function": "runtime.netpollclose", - "callStack": { - "function": "internal/poll.runtime_pollClose", - "callStack": { - "function": "internal/poll.(*FD).destroy", - "callStack": { - "function": "internal/poll.(*FD).decref", - "callStack": { - "function": "internal/poll.(*FD).Close", - "callStack": { - "function": "os.(*file).close", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5208", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "e7ced58322ac64ae4c5931010d6a3fc5a647942facc783846b707a2fc5bb0e52" - }, { "callStack": { "function": "syscall.Seek", @@ -14369,40 +14327,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "ee019853c1c8cd393e91ae684a879ab4e80224ef34f34fecda7a42b8ee738ffb" }, - { - "callStack": { - "function": "internal/syscall/unix.GetRandom", - "callStack": { - "function": "crypto/internal/sysrand.read", - "callStack": { - "function": "crypto/internal/sysrand.Read", - "callStack": { - "function": "crypto/internal/entropy.Depleted", - "callStack": { - "function": "crypto/internal/fips140/drbg.Read", - "callStack": { - "function": "crypto/rand.(*reader).Read", - "callStack": { - "function": "crypto/rand.Read", - "callStack": { - "function": "github.com/ethereum/go-ethereum/rpc.randomIDGenerator", - "callStack": { - "function": "github.com/ethereum/go-ethereum/rpc.init" - } - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5313", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "ee383489540ecb15022a72bcc900093ba6f652b5d76a5e405ddfe546e29e47df" - }, { "callStack": { "function": "syscall.openat", @@ -14904,6 +14828,67 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "fc6d08605736c4822973b5a95bd49cfcd854aeb70beeaf9486dd57cc56a0414a" }, + { + "callStack": { + "function": "syscall.stat", + "callStack": { + "function": "syscall.Stat", + "callStack": { + "function": "os.statNolog", + "callStack": { + "function": "os.Stat", + "callStack": { + "function": "github.com/ethereum/go-ethereum/common.FileExist", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadJournal", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadLayers", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5004", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "fc7609bd18384b36954855ebd2d909707e01ed0c67a7c45ea4aead058369210d" + }, { "callStack": { "function": "syscall.fstat", diff --git a/op-program/compatibility-test/baseline-cannon-multithreaded-64.json b/op-program/compatibility-test/baseline-cannon-multithreaded-64.json index 26c890bb23eb4..89a722c3123db 100644 --- a/op-program/compatibility-test/baseline-cannon-multithreaded-64.json +++ b/op-program/compatibility-test/baseline-cannon-multithreaded-64.json @@ -610,70 +610,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "25ee1dccb8d01fcf5c5e7b3ac41188fa9c613a6ba12f07e368dd37f0fa21fa93" }, - { - "callStack": { - "function": "syscall.Flock", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential Incompatible Syscall Detected: 5071", - "severity": "CRITICAL", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "262876708b03addf04292a157bf304f20115cfe55bc65bd3de80ab72ffd5db49" - }, { "callStack": { "function": "golang.org/x/sys/unix.Sysinfo", @@ -1226,6 +1162,64 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "3cde82f65fb30a398c492e48a470104a376f4f9ea855026b5961fbc936ccaa04" }, + { + "callStack": { + "function": "golang.org/x/sys/unix.Flock", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential Incompatible Syscall Detected: 5071", + "severity": "CRITICAL", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "40fa0eb7f9e4191fa33b1c7eaa32d4e6733dab9f790c7afbbf1d72ab231e2e35" + }, { "callStack": { "function": "syscall.Ftruncate", @@ -3472,6 +3466,70 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "acc0d615135b118439c6cda3f95baf7b9c0e85aac025c87968f60b0421ac27ee" }, + { + "callStack": { + "function": "golang.org/x/sys/unix.Flock", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential Incompatible Syscall Detected: 5071", + "severity": "CRITICAL", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "ade27e0b52d1c4050f192d6155199c8112288f564e8d56ecf1690aa5ad3ae0f2" + }, { "callStack": { "function": "syscall.lstat", @@ -4224,64 +4282,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "d3a9e0e4814e9f74db0571ba63e4f9ec806ce8f085f6feb46f51b78d161685b3" }, - { - "callStack": { - "function": "syscall.Flock", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential Incompatible Syscall Detected: 5071", - "severity": "CRITICAL", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "d9e0c3a236defe9edbba0b197c64c4d2e2c21ac351cf77a56269c5db2c2c0167" - }, { "callStack": { "function": "syscall.lstat", @@ -6873,29 +6873,96 @@ }, { "callStack": { - "function": "syscall.Seek", + "function": "syscall.stat", "callStack": { - "function": "internal/poll.(*FD).Seek", + "function": "syscall.Stat", "callStack": { - "function": "os.(*File).seek", + "function": "os.statNolog", "callStack": { - "function": "os.(*File).Seek", + "function": "os.Stat", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTableMeta).write", + "function": "github.com/ethereum/go-ethereum/common.FileExist", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).doSync", + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadJournal", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).Close", + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadLayers", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5004", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "5013ea916f3e0f37ec609b3ca053b80c8f7aea0073d0b5a8452135c9fa1d9657" + }, + { + "callStack": { + "function": "syscall.Seek", + "callStack": { + "function": "internal/poll.(*FD).Seek", + "callStack": { + "function": "os.(*File).seek", + "callStack": { + "function": "os.(*File).Seek", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTableMeta).write", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).doSync", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.(*freezerTable).Close", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", "callStack": { "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", "callStack": { @@ -9010,6 +9077,88 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "8abefb23a2e31c4f6c7e774de8cb638710702c8e269905dcf1902b09a0257c25" }, + { + "callStack": { + "function": "runtime.netpollclose", + "callStack": { + "function": "internal/poll.runtime_pollClose", + "callStack": { + "function": "internal/poll.(*FD).destroy", + "callStack": { + "function": "internal/poll.(*FD).decref", + "callStack": { + "function": "internal/poll.(*FD).Close", + "callStack": { + "function": "os.(*file).close", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).resetFh", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5208", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "8c8bcd5c08c7f155c27bcf48b638e8951d4e209b74cf3bec1973eb45bebd74f7" + }, { "callStack": { "function": "runtime.mincore", @@ -10318,6 +10467,82 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "b328ee40de012bf87181591dfaf037c1485ab12d38a12e8b4db0af414a805ed6" }, + { + "callStack": { + "function": "runtime.netpollclose", + "callStack": { + "function": "internal/poll.runtime_pollClose", + "callStack": { + "function": "internal/poll.(*FD).destroy", + "callStack": { + "function": "internal/poll.(*FD).decref", + "callStack": { + "function": "internal/poll.(*FD).Close", + "callStack": { + "function": "os.(*file).close", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).resetFh", + "callStack": { + "function": "github.com/gofrs/flock.(*Flock).Unlock", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5208", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "b51d3f1ff926563ad6cda4d625444b22c19a1b0a88677169873182df0f147907" + }, { "callStack": { "function": "syscall.Seek", @@ -10431,79 +10656,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "b726c684145b290d0039ee9f525cfeda00d1743da1bfd59f85236cef26a35f52" }, - { - "callStack": { - "function": "runtime.netpollclose", - "callStack": { - "function": "internal/poll.runtime_pollClose", - "callStack": { - "function": "internal/poll.(*FD).destroy", - "callStack": { - "function": "internal/poll.(*FD).decref", - "callStack": { - "function": "internal/poll.(*FD).Close", - "callStack": { - "function": "os.(*file).close", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5208", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "b9c7263a4b2bdd73a5030de130c9f26b56285141bed4b21cf372ac2bec7edf61" - }, { "callStack": { "function": "runtime.madvise", @@ -12196,85 +12348,6 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "e765d842c669be18a8bd4092a0a168e55aaf724ff54d00020da8ca27b872f888" }, - { - "callStack": { - "function": "runtime.netpollclose", - "callStack": { - "function": "internal/poll.runtime_pollClose", - "callStack": { - "function": "internal/poll.(*FD).destroy", - "callStack": { - "function": "internal/poll.(*FD).decref", - "callStack": { - "function": "internal/poll.(*FD).Close", - "callStack": { - "function": "os.(*file).close", - "callStack": { - "function": "github.com/gofrs/flock.(*Flock).Unlock", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.newResettableFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/core/rawdb.NewStateFreezer", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).repairHistory", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", - "callStack": { - "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).loadTransactions", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).consolidatedBlockByHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", - "callStack": { - "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", - "callStack": { - "function": "main.main" - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - } - }, - "message": "Potential NOOP Syscall Detected: 5208", - "severity": "WARNING", - "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", - "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", - "hash": "e7ced58322ac64ae4c5931010d6a3fc5a647942facc783846b707a2fc5bb0e52" - }, { "callStack": { "function": "syscall.Seek", @@ -12673,6 +12746,67 @@ "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", "hash": "fb1420c0bb3d7d169c6f4201b39abcf009b20e5b0a80847c025d08ed26185e19" }, + { + "callStack": { + "function": "syscall.stat", + "callStack": { + "function": "syscall.Stat", + "callStack": { + "function": "os.statNolog", + "callStack": { + "function": "os.Stat", + "callStack": { + "function": "github.com/ethereum/go-ethereum/common.FileExist", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadJournal", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.(*Database).loadLayers", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb/pathdb.New", + "callStack": { + "function": "github.com/ethereum/go-ethereum/triedb.NewDatabase", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/mpt.ReadTrie", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.(*ConsolidateOracle).ReceiptsByBlockHash", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.singleRoundConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.RunConsolidation", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.stateTransition", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client/interop.runInteropProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.RunProgram", + "callStack": { + "function": "github.com/ethereum-optimism/optimism/op-program/client.Main", + "callStack": { + "function": "main.main" + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + } + }, + "message": "Potential NOOP Syscall Detected: 5004", + "severity": "WARNING", + "impact": "This syscall is present in the program, but its execution depends on the actual runtime behavior. \n If the execution path does not reach this syscall, it may not affect execution.", + "reference": "https://github.com/ChainSafe/vm-compat?tab=readme-ov-file#how-it-works", + "hash": "fc7609bd18384b36954855ebd2d909707e01ed0c67a7c45ea4aead058369210d" + }, { "callStack": { "function": "runtime.sysFaultOS", diff --git a/op-program/host/cmd/main_test.go b/op-program/host/cmd/main_test.go index f2c5424ca37c3..dfd1d10c496e7 100644 --- a/op-program/host/cmd/main_test.go +++ b/op-program/host/cmd/main_test.go @@ -82,6 +82,7 @@ func TestDefaultCLIOptionsMatchDefaultConfig(t *testing.T) { defaultCfg := config.NewSingleChainConfig( rollupCfg, chainconfig.OPSepoliaChainConfig(), + params.SepoliaChainConfig, common.HexToHash(l1HeadValue), common.HexToHash(l2HeadValue), common.HexToHash(l2OutputRoot), diff --git a/op-program/host/common/l2_store.go b/op-program/host/common/l2_store.go index c18d668c189b9..b9333b1ca662e 100644 --- a/op-program/host/common/l2_store.go +++ b/op-program/host/common/l2_store.go @@ -96,6 +96,11 @@ func (b *batch) Delete(key []byte) error { return nil } +func (b *batch) DeleteRange(start []byte, end []byte) error { + // ignore deletes + return nil +} + func (b *batch) ValueSize() int { return b.size } diff --git a/op-program/host/config/config.go b/op-program/host/config/config.go index f1cff823404d7..3aa5190bdcdc6 100644 --- a/op-program/host/config/config.go +++ b/op-program/host/config/config.go @@ -16,13 +16,13 @@ import ( "github.com/ethereum-optimism/optimism/op-program/client/boot" "github.com/ethereum-optimism/optimism/op-program/host/types" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/jsonutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-program/host/flags" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/urfave/cli/v2" @@ -86,6 +86,10 @@ type Config struct { // L2ChainConfigs are the op-geth chain config for the L2 execution engines // Must have one chain config for each rollup config L2ChainConfigs []*params.ChainConfig + // L1ChainConfig is the geth chain config for the L1 execution engine + // For interop, we only have one L1 chain config + // since all L2 chains must have the same L1 + L1ChainConfig *params.ChainConfig // ExecCmd specifies the client program to execute in a separate process. // If unset, the fault proof client is run in the same process. ExecCmd string @@ -183,6 +187,7 @@ func (c *Config) FetchingEnabled() bool { func NewSingleChainConfig( rollupCfg *rollup.Config, l2ChainConfig *params.ChainConfig, + l1ChainConfig *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2OutputRoot common.Hash, @@ -198,6 +203,7 @@ func NewSingleChainConfig( cfg := NewConfig( []*rollup.Config{rollupCfg}, []*params.ChainConfig{l2ChainConfig}, + l1ChainConfig, l1Head, l2Head, l2OutputRoot, @@ -211,6 +217,7 @@ func NewSingleChainConfig( func NewConfig( rollupCfgs []*rollup.Config, l2ChainConfigs []*params.ChainConfig, + l1ChainConfig *params.ChainConfig, l1Head common.Hash, l2Head common.Hash, l2OutputRoot common.Hash, @@ -219,6 +226,7 @@ func NewConfig( ) *Config { return &Config{ Rollups: rollupCfgs, + L1ChainConfig: l1ChainConfig, L2ChainConfigs: l2ChainConfigs, L1Head: l1Head, L2Head: l2Head, @@ -289,7 +297,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { chainID = eth.ChainIDFromUInt64(ch.ChainID) } - l2ChainConfig, err := chainconfig.ChainConfigByChainID(chainID) + l2ChainConfig, err := chainconfig.L2ChainConfigByChainID(chainID) if err != nil { return nil, fmt.Errorf("failed to load chain config for chain %d: %w", chainID, err) } @@ -300,6 +308,8 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { } rollupCfgs = append(rollupCfgs, rollupCfg) + // L1 chain config resolution deferred until after all rollup configs are loaded + if interopEnabled { depSet, err := depset.FromRegistry(chainID) if err != nil && !errors.Is(err, superchain.ErrUnknownChain) { @@ -328,8 +338,38 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { return nil, fmt.Errorf("invalid rollup config: %w", err) } rollupCfgs = append(rollupCfgs, rollupCfg) + } + // Resolve L1 chain config akin to op-node's NewL1ChainConfig + if len(rollupCfgs) == 0 { + return nil, fmt.Errorf("no rollup configs provided to resolve L1 chain config") + } + l1ChainIDBig := rollupCfgs[0].L1ChainID + l1ChainConfig := eth.L1ChainConfigByChainID(eth.ChainIDFromBig(l1ChainIDBig)) + if l1ChainConfig == nil { + // if the l1 chain config is not known, we fallback to the CLI flag if set... + if ctx.IsSet(flags.L1ChainConfig.Name) { + cf, err := loadL1ChainConfigFromFile(ctx.String(flags.L1ChainConfig.Name)) + if err != nil { + return nil, fmt.Errorf("invalid l1 chain config: %w", err) + } + if cf.ChainID.Cmp(l1ChainIDBig) != 0 { + return nil, fmt.Errorf("l1 chain config chain ID mismatch: %v != %v", cf.ChainID, l1ChainIDBig) + } + l1ChainConfig = cf + } else { + // ... or the program-embedded lookup if no CLI flag is set + lc, err := chainconfig.L1ChainConfigByChainID(eth.ChainIDFromBig(l1ChainIDBig)) + if err != nil { + return nil, fmt.Errorf("failed to load l1 chain config for chain %d: %w", eth.EvilChainIDToUInt64(eth.ChainIDFromBig(l1ChainIDBig)), err) + } + l1ChainConfig = lc + } + } + if l1ChainConfig == nil || l1ChainConfig.BlobScheduleConfig == nil { + return nil, fmt.Errorf("L1 chain config does not have a blob schedule config") } + if ctx.Bool(flags.L2Custom.Name) { log.Warn("Using custom chain configuration via preimage oracle. This is not compatible with on-chain execution.") l2ChainID = boot.CustomChainIDIndicator @@ -358,6 +398,7 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { return &Config{ L2ChainID: l2ChainID, Rollups: rollupCfgs, + L1ChainConfig: l1ChainConfig, DataDir: ctx.String(flags.DataDir.Name), DataFormat: dbFormat, L2URLs: ctx.StringSlice(flags.L2NodeAddr.Name), @@ -381,16 +422,30 @@ func NewConfigFromCLI(log log.Logger, ctx *cli.Context) (*Config, error) { } func loadChainConfigFromGenesis(path string) (*params.ChainConfig, error) { - data, err := os.ReadFile(path) + cfg, err := jsonutil.LoadJSONFieldStrict[params.ChainConfig](path, "config") if err != nil { - return nil, fmt.Errorf("read l2 genesis file: %w", err) + return nil, fmt.Errorf("parse genesis file: %w", err) } - var genesis core.Genesis - err = json.Unmarshal(data, &genesis) + return cfg, nil +} + +// loadL1ChainConfigFromFile attempts to decode a file as a params.ChainConfig directly, +// and if that fails, it attempts to load the config from the .config field (genesis.json format). +func loadL1ChainConfigFromFile(path string) (*params.ChainConfig, error) { + file, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("parse l2 genesis file: %w", err) + return nil, fmt.Errorf("failed to read chain spec: %w", err) } - return genesis.Config, nil + defer file.Close() + + var chainConfig params.ChainConfig + dec := json.NewDecoder(file) + dec.DisallowUnknownFields() + if err := dec.Decode(&chainConfig); err == nil { + return &chainConfig, nil + } + + return jsonutil.LoadJSONFieldStrict[params.ChainConfig](path, "config") } func loadRollupConfig(rollupConfigPath string) (*rollup.Config, error) { diff --git a/op-program/host/config/config_test.go b/op-program/host/config/config_test.go index 128183a6e552a..0dc868adb6c77 100644 --- a/op-program/host/config/config_test.go +++ b/op-program/host/config/config_test.go @@ -18,6 +18,7 @@ import ( ) var ( + validL1ChainConfig = params.SepoliaChainConfig validRollupConfig = chaincfg.OPSepolia() validL2Genesis = chainconfig.OPSepoliaChainConfig() validL1Head = common.Hash{0xaa} @@ -234,8 +235,9 @@ func TestCustomL2ChainID(t *testing.T) { require.Equal(t, cfg.L2ChainID, eth.ChainIDFromBig(validL2Genesis.ChainID)) }) t.Run("custom", func(t *testing.T) { - customChainConfig := ¶ms.ChainConfig{ChainID: big.NewInt(0x1212121212)} - cfg := NewSingleChainConfig(validRollupConfig, customChainConfig, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) + customL1ChainConfig := ¶ms.ChainConfig{ChainID: big.NewInt(0x1212121212)} + customL2ChainConfig := ¶ms.ChainConfig{ChainID: big.NewInt(0x2323232323)} + cfg := NewSingleChainConfig(validRollupConfig, customL1ChainConfig, customL2ChainConfig, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) require.Equal(t, cfg.L2ChainID, boot.CustomChainIDIndicator) }) } @@ -296,7 +298,7 @@ func TestDBFormat(t *testing.T) { } func validConfig() *Config { - cfg := NewSingleChainConfig(validRollupConfig, validL2Genesis, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) + cfg := NewSingleChainConfig(validRollupConfig, validL2Genesis, validL1ChainConfig, validL1Head, validL2Head, validL2OutputRoot, validL2Claim, validL2ClaimBlockNum) cfg.DataDir = "/tmp/configTest" return cfg } diff --git a/op-program/host/flags/flags.go b/op-program/host/flags/flags.go index baeb3d2f84f5a..01aab1990718c 100644 --- a/op-program/host/flags/flags.go +++ b/op-program/host/flags/flags.go @@ -34,6 +34,11 @@ var ( Usage: "Rollup chain parameters", EnvVars: prefixEnvVars("ROLLUP_CONFIG"), } + L1ChainConfig = &cli.StringFlag{ + Name: "l1.chainconfig", + Usage: "L1 chain config file (path to genesis.json)", + EnvVars: prefixEnvVars("L1_CHAINCONFIG"), + } Network = &cli.StringSliceFlag{ Name: "network", Usage: fmt.Sprintf("Predefined network selection. Available networks: %s", strings.Join(chaincfg.AvailableNetworks(), ", ")), @@ -155,6 +160,7 @@ var programFlags = []cli.Flag{ L2AgreedPrestate, L2Custom, RollupConfig, + L1ChainConfig, Network, DataDir, DataFormat, diff --git a/op-program/host/host.go b/op-program/host/host.go index 4c43464d66741..be9063f316960 100644 --- a/op-program/host/host.go +++ b/op-program/host/host.go @@ -145,6 +145,13 @@ func (p *programExecutor) RunProgram( return fmt.Errorf("could not find rollup config in the host for chain ID %v", chainID) } + var l1ChainConfig *params.ChainConfig + if eth.ChainIDFromBig(p.cfg.L1ChainConfig.ChainID).Cmp(eth.ChainIDFromBig(rollupConfig.L1ChainID)) == 0 { + l1ChainConfig = p.cfg.L1ChainConfig + } else { + return fmt.Errorf("L1 chain config chain ID mismatch: %v != %v", eth.ChainIDFromBig(p.cfg.L1ChainConfig.ChainID), eth.ChainIDFromBig(rollupConfig.L1ChainID)) + } + prefetcherCreator := func(context.Context, log.Logger, kvstore.KV, *config.Config) (hostcommon.Prefetcher, error) { // TODO(#13663): prevent recursive block execution return prefetcher, nil @@ -165,6 +172,7 @@ func (p *programExecutor) RunProgram( result, err := tasks.RunDerivation( p.logger, rollupConfig, + l1ChainConfig, p.cfg.DependencySet, l2ChainConfig, p.cfg.L1Head, diff --git a/op-program/host/host_test.go b/op-program/host/host_test.go index 31f2de67bcc6d..d5153d243f010 100644 --- a/op-program/host/host_test.go +++ b/op-program/host/host_test.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" "github.com/stretchr/testify/require" ) @@ -25,7 +26,7 @@ func TestServerMode(t *testing.T) { l1Head := common.Hash{0x11} l2OutputRoot := common.Hash{0x33} - cfg := config.NewSingleChainConfig(chaincfg.OPSepolia(), chainconfig.OPSepoliaChainConfig(), l1Head, common.Hash{0x22}, l2OutputRoot, common.Hash{0x44}, 1000) + cfg := config.NewSingleChainConfig(chaincfg.OPSepolia(), params.SepoliaChainConfig, chainconfig.OPSepoliaChainConfig(), l1Head, common.Hash{0x22}, l2OutputRoot, common.Hash{0x44}, 1000) cfg.DataDir = dir cfg.ServerMode = true diff --git a/op-program/host/kvstore/local.go b/op-program/host/kvstore/local.go index 4d52c6decebf9..823bf42431668 100644 --- a/op-program/host/kvstore/local.go +++ b/op-program/host/kvstore/local.go @@ -28,6 +28,7 @@ var ( l2ChainConfigKey = boot.L2ChainConfigLocalIndex.PreimageKey() rollupKey = boot.RollupConfigLocalIndex.PreimageKey() dependencySetKey = boot.DependencySetLocalIndex.PreimageKey() + l1ChainConfigKey = boot.L1ChainConfigLocalIndex.PreimageKey() ) func (s *LocalPreimageSource) Get(key common.Hash) ([]byte, error) { @@ -63,6 +64,12 @@ func (s *LocalPreimageSource) Get(key common.Hash) ([]byte, error) { return nil, errors.New("host is not configured to serve dependencySet local keys") } return json.Marshal(s.config.DependencySet) + case l1ChainConfigKey: + // NOTE: We check the L2 chain ID again to determine if we are using custom configs + if s.config.L2ChainID != boot.CustomChainIDIndicator { + return nil, ErrNotFound + } + return json.Marshal(s.config.L1ChainConfig) default: return nil, ErrNotFound } diff --git a/op-program/host/kvstore/pebble.go b/op-program/host/kvstore/pebble.go index 5bc7fcc9f23a7..9a678a26c3fb2 100644 --- a/op-program/host/kvstore/pebble.go +++ b/op-program/host/kvstore/pebble.go @@ -52,9 +52,10 @@ func (d *pebbleKV) Get(k common.Hash) ([]byte, error) { } return nil, err } + defer closer.Close() + ret := make([]byte, len(dat)) copy(ret, dat) - closer.Close() return ret, nil } diff --git a/op-program/host/subcmds/configs_cmd.go b/op-program/host/subcmds/configs_cmd.go index 752a2af627a80..bb49850a56a16 100644 --- a/op-program/host/subcmds/configs_cmd.go +++ b/op-program/host/subcmds/configs_cmd.go @@ -97,7 +97,7 @@ func listChain(chainID eth.ChainID) error { return err } // Double check the L2 genesis is really available - _, err = chainconfig.ChainConfigByChainID(chainID) + _, err = chainconfig.L2ChainConfigByChainID(chainID) if err != nil { return err } @@ -131,12 +131,17 @@ func CheckCustomChains(ctx *cli.Context) error { errs = append(errs, err) continue } - _, err = chainconfig.ChainConfigByChainID(chainID) + _, err = chainconfig.L2ChainConfigByChainID(chainID) + if err != nil { + errs = append(errs, err) + continue + } + l1ChainID := eth.ChainIDFromBig(cfg.L1ChainID) + _, err = chainconfig.L1ChainConfigByChainID(l1ChainID) if err != nil { errs = append(errs, err) continue } - if cfg.InteropTime != nil { depset, err := chainconfig.DependencySetByChainID(chainID) if err != nil { diff --git a/op-program/repro.justfile b/op-program/repro.justfile index 94ae3dc66ada2..71ebfdd54eb61 100644 --- a/op-program/repro.justfile +++ b/op-program/repro.justfile @@ -32,27 +32,13 @@ op-program-client-mips: GITDATE={{GIT_DATE}} \ VERSION={{OP_PROGRAM_VERSION}} -# Run the op-program-client elf binary directly through cannon's load-elf subcommand. -client TYPE CLIENT_SUFFIX PRESTATE_SUFFIX: cannon op-program-client-mips - #!/bin/bash - echo "Checking program version | $(go version /app/op-program/bin/op-program-client{{CLIENT_SUFFIX}}.elf)" - /app/cannon/bin/cannon load-elf \ - --type {{TYPE}} \ - --path /app/op-program/bin/op-program-client{{CLIENT_SUFFIX}}.elf \ - --out /app/op-program/bin/prestate{{PRESTATE_SUFFIX}}.bin.gz \ - --meta "/app/op-program/bin/meta{{PRESTATE_SUFFIX}}.json" - # Generate the prestate proof containing the absolute pre-state hash. -prestate TYPE CLIENT_SUFFIX PRESTATE_SUFFIX: (client TYPE CLIENT_SUFFIX PRESTATE_SUFFIX) +prestate TYPE CLIENT_SUFFIX PRESTATE_SUFFIX: cannon op-program-client-mips #!/bin/bash - /app/cannon/bin/cannon run \ - --proof-at '=0' \ - --stop-at '=1' \ - --input /app/op-program/bin/prestate{{PRESTATE_SUFFIX}}.bin.gz \ - --meta "" \ - --proof-fmt '/app/op-program/bin/%d{{PRESTATE_SUFFIX}}.json' \ - --output "" - mv /app/op-program/bin/0{{PRESTATE_SUFFIX}}.json /app/op-program/bin/prestate-proof{{PRESTATE_SUFFIX}}.json + go run /app/op-program/builder/main.go build-prestate \ + --program-elf /app/op-program/bin/op-program-client{{CLIENT_SUFFIX}}.elf \ + --version {{TYPE}}\ + --suffix {{PRESTATE_SUFFIX}} build-mt64: (prestate "multithreaded64-4" "64" "-mt64") build-mt64Next: (prestate "multithreaded64-5" "64" "-mt64Next") diff --git a/op-program/scripts/build-prestates.sh b/op-program/scripts/build-prestates.sh index 129706ec36e16..ce098f583e67a 100755 --- a/op-program/scripts/build-prestates.sh +++ b/op-program/scripts/build-prestates.sh @@ -22,21 +22,31 @@ VERSIONS_FILE="${STATES_DIR}/versions.json" mkdir -p "${STATES_DIR}" "${LOGS_DIR}" - cd "${REPO_DIR}" VERSIONS_JSON="[]" -VERSIONS=$(git tag --list 'op-program/v*' --sort taggerdate) +readarray -t VERSIONS < <(git tag --list 'op-program/v*' --sort taggerdate) -for VERSION in ${VERSIONS} +for VERSION in "${VERSIONS[@]}" do SHORT_VERSION=$(echo "${VERSION}" | cut -c 13-) LOG_FILE="${LOGS_DIR}/build-${SHORT_VERSION}.txt" echo "Building Version: ${VERSION} Logs: ${LOG_FILE}" - git checkout "${VERSION}" > "${LOG_FILE}" 2>&1 + # use --force to overwrite any mise.toml changes + git checkout --force "${VERSION}" > "${LOG_FILE}" 2>&1 if [ -f mise.toml ] then echo "Install dependencies with mise" >> "${LOG_FILE}" + # we rely only on go and jq for the reproducible-prestate build. + # The mise cache should already have jq preinstalled + # But we need to ensure that this ${VERSION} has the correct go version + # So we replace the mise.toml with a minimal one that only specifies go + # Otherwise, `mise install` fails as it conflicts with other preinstalled dependencies + GO_VERSION=$(mise config get tools.go) + cat >mise.toml <> "${LOG_FILE}" 2>&1 fi rm -rf "${BIN_DIR}" diff --git a/op-program/verify/verify.go b/op-program/verify/verify.go index 5ba5ea77f715a..283956edaf591 100644 --- a/op-program/verify/verify.go +++ b/op-program/verify/verify.go @@ -36,6 +36,7 @@ type Runner struct { dataDir string network string chainCfg *params.ChainConfig + l1ChainCfg *params.ChainConfig l2Client *sources.L2Client logCfg oplog.CLIConfig setupLog log.Logger @@ -50,7 +51,7 @@ func NewRunner(l1RpcUrl string, l1RpcKind string, l1BeaconUrl string, l2RpcUrl s setupLog := oplog.NewLogger(os.Stderr, logCfg) - l2RawRpc, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, setupLog, l2RpcUrl) + l2RawRpc, err := dial.DialRPCClientWithTimeout(ctx, setupLog, l2RpcUrl) if err != nil { return nil, fmt.Errorf("dial L2 client: %w", err) } @@ -60,11 +61,16 @@ func NewRunner(l1RpcUrl string, l1RpcKind string, l1BeaconUrl string, l2RpcUrl s return nil, fmt.Errorf("failed to load rollup config: %w", err) } - chainCfg, err := chainconfig.ChainConfigByChainID(chainID) + chainCfg, err := chainconfig.L2ChainConfigByChainID(chainID) if err != nil { return nil, fmt.Errorf("failed to load chain config: %w", err) } + l1ChainCfg, err := chainconfig.L1ChainConfigByChainID(eth.ChainIDFromBig(rollupCfg.L1ChainID)) + if err != nil { + return nil, fmt.Errorf("failed to load l1 chain config: %w", err) + } + l2ClientCfg := sources.L2ClientDefaultConfig(rollupCfg, false) l2RPC := client.NewBaseRPCClient(l2RawRpc) l2Client, err := sources.NewL2Client(l2RPC, setupLog, nil, l2ClientCfg) @@ -84,6 +90,7 @@ func NewRunner(l1RpcUrl string, l1RpcKind string, l1BeaconUrl string, l2RpcUrl s setupLog: setupLog, l2Client: l2Client, rollupCfg: rollupCfg, + l1ChainCfg: l1ChainCfg, runInProcess: runInProcess, }, nil } @@ -112,7 +119,7 @@ func (r *Runner) RunBetweenBlocks(ctx context.Context, l1Head common.Hash, start } func (r *Runner) createL2Client(ctx context.Context) (*sources.L2Client, error) { - l2RawRpc, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, r.setupLog, r.l2RpcUrl) + l2RawRpc, err := dial.DialRPCClientWithTimeout(ctx, r.setupLog, r.l2RpcUrl) if err != nil { return nil, fmt.Errorf("dial L2 client: %w", err) } @@ -210,7 +217,7 @@ func (r *Runner) run(ctx context.Context, l1Head common.Hash, agreedBlockInfo et if r.runInProcess { offlineCfg := config.NewSingleChainConfig( - r.rollupCfg, r.chainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) + r.rollupCfg, r.chainCfg, r.l1ChainCfg, l1Head, agreedBlockInfo.Hash(), agreedOutputRoot, claimedOutputRoot, claimedBlockInfo.NumberU64()) offlineCfg.DataDir = r.dataDir onlineCfg := *offlineCfg diff --git a/op-proposer/metrics/metrics.go b/op-proposer/metrics/metrics.go index 3680bef92de1c..64318be40fa62 100644 --- a/op-proposer/metrics/metrics.go +++ b/op-proposer/metrics/metrics.go @@ -72,7 +72,7 @@ func NewMetrics(procName string) *Metrics { TxMetrics: txmetrics.MakeTxMetrics(ns, factory), RPCMetrics: opmetrics.MakeRPCMetrics(ns, factory), - proposalSequenceNum: prometheus.NewGauge(prometheus.GaugeOpts{ + proposalSequenceNum: factory.NewGauge(prometheus.GaugeOpts{ Namespace: ns, Name: "proposed_sequence_number", Help: "Sequence number (block number or timestamp) of the latest proposal", diff --git a/op-proposer/metrics/metrics_test.go b/op-proposer/metrics/metrics_test.go new file mode 100644 index 0000000000000..6b865480ec6be --- /dev/null +++ b/op-proposer/metrics/metrics_test.go @@ -0,0 +1,32 @@ +package metrics + +import ( + "testing" + + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/stretchr/testify/require" +) + +func TestMetrics(test *testing.T) { + procName := "acceptance_test" + prefix := Namespace + "_" + procName + "_" + + expectedSequenceNumber := 1.0 + infoLabel := "test" + expectedInfo := 1.0 + expectedUp := 1.0 + + metrics := NewMetrics(procName) + metrics.RecordL2Proposal(uint64(expectedSequenceNumber)) + metrics.RecordInfo(infoLabel) + metrics.RecordUp() + + checker := opmetrics.NewMetricChecker(test, metrics.Registry()) + sequenceNumberMetric := checker.FindByName(prefix + "proposed_sequence_number").FindByLabels(nil).Gauge.GetValue() + infoMetric := checker.FindByName(prefix + "info").FindByLabels(map[string]string{"version": infoLabel}).Gauge.GetValue() + upMetric := checker.FindByName(prefix + "up").FindByLabels(nil).Gauge.GetValue() + + require.Equal(test, expectedSequenceNumber, sequenceNumberMetric) + require.Equal(test, expectedInfo, infoMetric) + require.Equal(test, expectedUp, upMetric) +} diff --git a/op-proposer/proposer/service.go b/op-proposer/proposer/service.go index 21d327cbeafd8..ac80a8a3baf2c 100644 --- a/op-proposer/proposer/service.go +++ b/op-proposer/proposer/service.go @@ -20,7 +20,6 @@ import ( opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/txmgr" @@ -148,12 +147,11 @@ func (ps *ProposerService) initRPCClients(ctx context.Context, cfg *CLIConfig) e if len(cfg.SupervisorRpcs) != 0 { var clients []source.SupervisorClient for _, url := range cfg.SupervisorRpcs { - supervisorRpc, err := dial.DialRPCClientWithTimeout(ctx, dial.DefaultDialTimeout, ps.Log, url) + cl, err := dial.DialSupervisorClientWithTimeout(ctx, ps.Log, url, + client.WithRPCRecorder(ps.Metrics.NewRecorder("supervisor"))) if err != nil { return fmt.Errorf("failed to dial supervisor RPC client (%v): %w", url, err) } - cl := sources.NewSupervisorClient(client.NewBaseRPCClient(supervisorRpc, - client.WithRPCRecorder(ps.Metrics.NewRecorder("supervisor")))) clients = append(clients, cl) } ps.ProposalSource = source.NewSupervisorProposalSource(ps.Log, clients...) diff --git a/op-service/README.md b/op-service/README.md index 8626cc355e30c..e5a9cad89b0e3 100644 --- a/op-service/README.md +++ b/op-service/README.md @@ -28,6 +28,7 @@ Pull requests: [monorepo](https://github.com/ethereum-optimism/optimism/pulls?q= ├── jsonutil - JSON encoding/decoding utils ├── locks - Lock utils, like read-write wrapped types ├── log - Logging CLI and middleware utils +├── logpipe - Logs streaming from io.Reader to logger ├── logfilter - Logging filters ├── logmods - Log handler wrapping/unwrapping utils ├── metrics - Metrics types, metering abstractions, server utils diff --git a/op-service/apis/batcher.go b/op-service/apis/batcher.go index 987331f2c38a4..2668c5083dfdb 100644 --- a/op-service/apis/batcher.go +++ b/op-service/apis/batcher.go @@ -5,6 +5,7 @@ import "context" type BatcherActivity interface { StartBatcher(ctx context.Context) error StopBatcher(ctx context.Context) error + FlushBatcher(ctx context.Context) error } type BatcherAdminServer interface { diff --git a/op-service/apis/beacon.go b/op-service/apis/beacon.go index 4be95f53f6190..fd24f2c55b0da 100644 --- a/op-service/apis/beacon.go +++ b/op-service/apis/beacon.go @@ -11,6 +11,7 @@ type BeaconClient interface { NodeVersion(ctx context.Context) (string, error) ConfigSpec(ctx context.Context) (eth.APIConfigResponse, error) BeaconGenesis(ctx context.Context) (eth.APIGenesisResponse, error) + BeaconBlobs(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIBeaconBlobsResponse, error) BlobSideCarsClient } diff --git a/op-service/apis/engine.go b/op-service/apis/engine.go new file mode 100644 index 0000000000000..027f683cebfab --- /dev/null +++ b/op-service/apis/engine.go @@ -0,0 +1,14 @@ +package apis + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" +) + +type EngineClient interface { + GetPayload(ctx context.Context, payloadInfo eth.PayloadInfo) (*eth.ExecutionPayloadEnvelope, error) + ForkchoiceUpdate(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) + NewPayload(ctx context.Context, payload *eth.ExecutionPayload, parentBeaconBlockRoot *common.Hash) (*eth.PayloadStatusV1, error) +} diff --git a/op-service/apis/eth.go b/op-service/apis/eth.go index ba13ced1d536c..2d35d7999414c 100644 --- a/op-service/apis/eth.go +++ b/op-service/apis/eth.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources/batching" @@ -100,7 +101,7 @@ type Gas interface { type EthCall interface { // Call executes a message call transaction but never mined into the blockchain. - Call(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + Call(ctx context.Context, msg ethereum.CallMsg, blockNumber rpc.BlockNumber) ([]byte, error) } type TransactionSender interface { diff --git a/op-service/apis/sync_tester.go b/op-service/apis/sync_tester.go new file mode 100644 index 0000000000000..7e345cb2df881 --- /dev/null +++ b/op-service/apis/sync_tester.go @@ -0,0 +1,49 @@ +package apis + +import ( + "context" + "encoding/json" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" +) + +type SyncTester interface { + // Only expose sync namespace for encapsulation + SyncAPI + // ChainID for minimal sanity check + ChainID(ctx context.Context) (eth.ChainID, error) +} + +type SyncAPI interface { + GetSession(ctx context.Context) (*eth.SyncTesterSession, error) + DeleteSession(ctx context.Context) error + ResetSession(ctx context.Context) error + ListSessions(ctx context.Context) ([]string, error) +} + +type EthAPI interface { + GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) + GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) + GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) + ChainId(ctx context.Context) (hexutil.Big, error) +} + +type EngineAPI interface { + GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) + GetPayloadV2(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) + GetPayloadV3(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) + GetPayloadV4(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) + + ForkchoiceUpdatedV1(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) + ForkchoiceUpdatedV2(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) + ForkchoiceUpdatedV3(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) + + NewPayloadV1(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) + NewPayloadV2(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) + NewPayloadV3(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash) (*eth.PayloadStatusV1, error) + NewPayloadV4(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) +} diff --git a/op-service/client/dial_test.go b/op-service/client/dial_test.go index 08163976f9f64..ef71e046e5cdf 100644 --- a/op-service/client/dial_test.go +++ b/op-service/client/dial_test.go @@ -6,10 +6,13 @@ import ( "net" "strings" "testing" + "time" "github.com/stretchr/testify/require" ) +const defaultConnectTimeout = 5 * time.Second + func TestIsURLAvailableLocal(t *testing.T) { listener, err := net.Listen("tcp4", ":0") require.NoError(t, err) @@ -20,38 +23,38 @@ func TestIsURLAvailableLocal(t *testing.T) { addr := fmt.Sprintf("http://localhost:%s", parts[1]) // True & False with ports - require.True(t, IsURLAvailable(context.Background(), addr)) - require.False(t, IsURLAvailable(context.Background(), "http://localhost:0")) + require.True(t, IsURLAvailable(context.Background(), addr, defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "http://localhost:0", defaultConnectTimeout)) // Fail open if we don't recognize the scheme - require.True(t, IsURLAvailable(context.Background(), "mailto://example.com")) + require.True(t, IsURLAvailable(context.Background(), "mailto://example.com", defaultConnectTimeout)) } func TestIsURLAvailableNonLocal(t *testing.T) { - if !IsURLAvailable(context.Background(), "http://example.com") { + if !IsURLAvailable(context.Background(), "http://example.com", defaultConnectTimeout) { t.Skip("No internet connection found, skipping this test") } // True without ports. http & https - require.True(t, IsURLAvailable(context.Background(), "http://example.com")) - require.True(t, IsURLAvailable(context.Background(), "http://example.com/hello")) - require.True(t, IsURLAvailable(context.Background(), "https://example.com")) - require.True(t, IsURLAvailable(context.Background(), "https://example.com/hello")) + require.True(t, IsURLAvailable(context.Background(), "http://example.com", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "http://example.com/hello", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "https://example.com", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "https://example.com/hello", defaultConnectTimeout)) // True without ports. ws & wss - require.True(t, IsURLAvailable(context.Background(), "ws://example.com")) - require.True(t, IsURLAvailable(context.Background(), "ws://example.com/hello")) - require.True(t, IsURLAvailable(context.Background(), "wss://example.com")) - require.True(t, IsURLAvailable(context.Background(), "wss://example.com/hello")) + require.True(t, IsURLAvailable(context.Background(), "ws://example.com", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "ws://example.com/hello", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "wss://example.com", defaultConnectTimeout)) + require.True(t, IsURLAvailable(context.Background(), "wss://example.com/hello", defaultConnectTimeout)) // False without ports - require.False(t, IsURLAvailable(context.Background(), "http://fakedomainnamethatdoesnotexistandshouldneverexist.com")) - require.False(t, IsURLAvailable(context.Background(), "http://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello")) - require.False(t, IsURLAvailable(context.Background(), "https://fakedomainnamethatdoesnotexistandshouldneverexist.com")) - require.False(t, IsURLAvailable(context.Background(), "https://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello")) - require.False(t, IsURLAvailable(context.Background(), "ws://fakedomainnamethatdoesnotexistandshouldneverexist.com")) - require.False(t, IsURLAvailable(context.Background(), "ws://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello")) - require.False(t, IsURLAvailable(context.Background(), "wss://fakedomainnamethatdoesnotexistandshouldneverexist.com")) - require.False(t, IsURLAvailable(context.Background(), "wss://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello")) + require.False(t, IsURLAvailable(context.Background(), "http://fakedomainnamethatdoesnotexistandshouldneverexist.com", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "http://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "https://fakedomainnamethatdoesnotexistandshouldneverexist.com", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "https://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "ws://fakedomainnamethatdoesnotexistandshouldneverexist.com", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "ws://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "wss://fakedomainnamethatdoesnotexistandshouldneverexist.com", defaultConnectTimeout)) + require.False(t, IsURLAvailable(context.Background(), "wss://fakedomainnamethatdoesnotexistandshouldneverexist.com/hello", defaultConnectTimeout)) } diff --git a/op-service/client/rpc.go b/op-service/client/rpc.go index 09713b7e754dd..63694ced737c8 100644 --- a/op-service/client/rpc.go +++ b/op-service/client/rpc.go @@ -36,10 +36,17 @@ type rpcConfig struct { callTimeout time.Duration batchCallTimeout time.Duration fixedDialBackoff time.Duration + connectTimeout time.Duration } type RPCOption func(cfg *rpcConfig) +func WithConnectTimeout(d time.Duration) RPCOption { + return func(cfg *rpcConfig) { + cfg.connectTimeout = d + } +} + func WithCallTimeout(d time.Duration) RPCOption { return func(cfg *rpcConfig) { cfg.callTimeout = d @@ -131,6 +138,9 @@ func applyOptions(opts []RPCOption) rpcConfig { opt(&cfg) } + if cfg.connectTimeout == 0 { + cfg.connectTimeout = 10 * time.Second + } if cfg.backoffAttempts < 1 { // default to at least 1 attempt, or it always fails to dial. cfg.backoffAttempts = 1 } @@ -158,12 +168,15 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, bOff = retry.Fixed(cfg.fixedDialBackoff) } return retry.Do(ctx, cfg.backoffAttempts, bOff, func() (*rpc.Client, error) { - return CheckAndDial(ctx, log, addr, cfg.gethRPCOptions...) + return CheckAndDial(ctx, log, addr, cfg.connectTimeout, cfg.gethRPCOptions...) }) } -func CheckAndDial(ctx context.Context, log log.Logger, addr string, options ...rpc.ClientOption) (*rpc.Client, error) { - if !IsURLAvailable(ctx, addr) { +func CheckAndDial(ctx context.Context, log log.Logger, addr string, connectTimeout time.Duration, options ...rpc.ClientOption) (*rpc.Client, error) { + ctx, cancel := context.WithTimeout(ctx, connectTimeout) + defer cancel() + + if !IsURLAvailable(ctx, addr, connectTimeout) { log.Warn("failed to dial address, but may connect later", "addr", addr) return nil, fmt.Errorf("address unavailable (%s)", addr) } @@ -174,7 +187,7 @@ func CheckAndDial(ctx context.Context, log log.Logger, addr string, options ...r return client, nil } -func IsURLAvailable(ctx context.Context, address string) bool { +func IsURLAvailable(ctx context.Context, address string, timeout time.Duration) bool { u, err := url.Parse(address) if err != nil { return false @@ -191,7 +204,7 @@ func IsURLAvailable(ctx context.Context, address string) bool { return true } } - dialer := net.Dialer{Timeout: 5 * time.Second} + dialer := net.Dialer{Timeout: timeout} conn, err := dialer.DialContext(ctx, "tcp", addr) if err != nil { return false diff --git a/op-service/cliutil/struct.go b/op-service/cliutil/struct.go index 74fd432059964..8f7cbf5dcbb89 100644 --- a/op-service/cliutil/struct.go +++ b/op-service/cliutil/struct.go @@ -2,8 +2,10 @@ package cliutil import ( "encoding" + "encoding/hex" "fmt" "reflect" + "strings" "github.com/ethereum/go-ethereum/common" "github.com/urfave/cli/v2" @@ -102,6 +104,32 @@ func handleSpecialTypes(fieldValue reflect.Value, fieldType reflect.Type, ctx *c return nil } + // Handle common.Hash + if fieldType == reflect.TypeOf(common.Hash{}) { + if !ctx.IsSet(flag) { + return nil + } + + hashStr := strings.TrimPrefix(ctx.String(flag), "0x") + + // Validate hex format and length + if hashStr != "" { + // Check length - common.Hash is 32 bytes = 64 hex chars + "0x" prefix = 66 total + if len(hashStr) != 64 { + return fmt.Errorf("invalid hash: length must be 64 characters") + } + + // Validate hex characters + if _, err := hex.DecodeString(hashStr); err != nil { + return fmt.Errorf("invalid hash: non-hex characters in hash") + } + } + + hash := common.HexToHash(hashStr) + fieldValue.Set(reflect.ValueOf(hash)) + return nil + } + // If type implements TextUnmarshaler if unmarshaler, ok := fieldValue.Interface().(encoding.TextUnmarshaler); ok { return unmarshaler.UnmarshalText([]byte(ctx.String(flag))) diff --git a/op-service/cliutil/struct_test.go b/op-service/cliutil/struct_test.go index a13dde9a057e3..758cd0ca5f1c7 100644 --- a/op-service/cliutil/struct_test.go +++ b/op-service/cliutil/struct_test.go @@ -26,6 +26,7 @@ func TestPopulateStruct(t *testing.T) { Int64 int64 `cli:"int64"` Uint64 uint64 `cli:"uint64"` Address common.Address `cli:"address"` + Hash common.Hash `cli:"hash"` TextUnmarshaler *textUnmarshalerThing `cli:"text-unmarshaler"` NotTagged string } @@ -45,6 +46,7 @@ func TestPopulateStruct(t *testing.T) { "--int64=2", "--uint64=3", fmt.Sprintf("--address=%s", common.HexToAddress("0x42")), + fmt.Sprintf("--hash=%s", common.HexToHash("43")), "--text-unmarshaler=hello", }, exp: testStruct{ @@ -54,6 +56,7 @@ func TestPopulateStruct(t *testing.T) { Int64: 2, Uint64: 3, Address: common.HexToAddress("0x42"), + Hash: common.HexToHash("0x43"), TextUnmarshaler: &textUnmarshalerThing{ text: "hello", }, @@ -71,6 +74,29 @@ func TestPopulateStruct(t *testing.T) { }, expErr: "invalid address", }, + { + name: "invalid hash flag (invalid length)", + args: []string{ + "--hash=12345678901234567890123456789012345678901234567890123456789012345", + }, + expErr: "invalid hash: length must be 64 characters", + }, + { + name: "invalid hash flag (invalid characters)", + args: []string{ + "--hash=123456789012345678901234567890123456789012345678901234567890123g", + }, + expErr: "invalid hash: non-hex characters in hash", + }, + { + name: "allow zero hash", + args: []string{ + fmt.Sprintf("--hash=%s", common.HexToHash("0")), + }, + exp: testStruct{ + Hash: common.HexToHash("0x0"), + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -95,6 +121,9 @@ func TestPopulateStruct(t *testing.T) { &cli.StringFlag{ Name: "address", }, + &cli.StringFlag{ + Name: "hash", + }, &cli.StringFlag{ Name: "text-unmarshaler", }, diff --git a/op-service/dial/dial.go b/op-service/dial/dial.go index d5f2726462928..e6ce72628ed2c 100644 --- a/op-service/dial/dial.go +++ b/op-service/dial/dial.go @@ -16,6 +16,7 @@ import ( const DefaultDialTimeout = 1 * time.Minute const defaultRetryCount = 30 const defaultRetryTime = 2 * time.Second +const defaultConnectTimeout = 10 * time.Second // DialEthClientWithTimeout attempts to dial the L1 provider using the provided // URL. If the dial doesn't complete within defaultDialTimeout seconds, this @@ -32,19 +33,22 @@ func DialEthClientWithTimeout(ctx context.Context, timeout time.Duration, log lo return ethclient.NewClient(c), nil } -// DialRollupClientWithTimeout attempts to dial the RPC provider using the provided URL. -// If the dial doesn't complete within timeout seconds, this method will return an error. -func DialRollupClientWithTimeout(ctx context.Context, timeout time.Duration, log log.Logger, url string, callerOpts ...client.RPCOption) (*sources.RollupClient, error) { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - +// dialClientWithTimeout dials an RPC client with a timeout. +func dialClientWithTimeout(ctx context.Context, log log.Logger, url string, callerOpts ...client.RPCOption) (client.RPC, error) { opts := []client.RPCOption{ client.WithFixedDialBackoff(defaultRetryTime), client.WithDialAttempts(defaultRetryCount), + client.WithConnectTimeout(defaultConnectTimeout), } opts = append(opts, callerOpts...) - rpcCl, err := client.NewRPC(ctx, log, url, opts...) + return client.NewRPC(ctx, log, url, opts...) +} + +// DialRollupClientWithTimeout attempts to dial the RPC provider using the provided URL. +// The timeout and retry logic is handled internally by the client. +func DialRollupClientWithTimeout(ctx context.Context, log log.Logger, url string, callerOpts ...client.RPCOption) (*sources.RollupClient, error) { + rpcCl, err := dialClientWithTimeout(ctx, log, url, callerOpts...) if err != nil { return nil, err } @@ -52,12 +56,18 @@ func DialRollupClientWithTimeout(ctx context.Context, timeout time.Duration, log return sources.NewRollupClient(rpcCl), nil } -// DialRPCClientWithTimeout attempts to dial the RPC provider using the provided URL. -// If the dial doesn't complete within timeout seconds, this method will return an error. -func DialRPCClientWithTimeout(ctx context.Context, timeout time.Duration, log log.Logger, url string, opts ...rpc.ClientOption) (*rpc.Client, error) { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() +func DialSupervisorClientWithTimeout(ctx context.Context, log log.Logger, url string, callerOpts ...client.RPCOption) (*sources.SupervisorClient, error) { + rpcCl, err := dialClientWithTimeout(ctx, log, url, callerOpts...) + if err != nil { + return nil, err + } + + return sources.NewSupervisorClient(rpcCl), nil +} +// DialRPCClientWithTimeout attempts to dial the RPC provider using the provided URL. +// The timeout and retry logic is handled internally by the client. +func DialRPCClientWithTimeout(ctx context.Context, log log.Logger, url string, opts ...rpc.ClientOption) (*rpc.Client, error) { return dialRPCClientWithBackoff(ctx, log, url, opts...) } @@ -71,5 +81,5 @@ func dialRPCClientWithBackoff(ctx context.Context, log log.Logger, addr string, // Dials a JSON-RPC endpoint once. func dialRPCClient(ctx context.Context, log log.Logger, addr string, opts ...rpc.ClientOption) (*rpc.Client, error) { - return client.CheckAndDial(ctx, log, addr, opts...) + return client.CheckAndDial(ctx, log, addr, defaultConnectTimeout, opts...) } diff --git a/op-service/dial/static_rollup_provider.go b/op-service/dial/static_rollup_provider.go index 1b3e8e16d122e..cfb6546bd1762 100644 --- a/op-service/dial/static_rollup_provider.go +++ b/op-service/dial/static_rollup_provider.go @@ -25,7 +25,7 @@ type StaticL2RollupProvider struct { } func NewStaticL2RollupProvider(ctx context.Context, log log.Logger, rollupClientUrl string) (*StaticL2RollupProvider, error) { - rollupClient, err := DialRollupClientWithTimeout(ctx, DefaultDialTimeout, log, rollupClientUrl) + rollupClient, err := DialRollupClientWithTimeout(ctx, log, rollupClientUrl) if err != nil { return nil, err } diff --git a/op-service/eth/blob.go b/op-service/eth/blob.go index 142feba08968f..ff3eccdc4068b 100644 --- a/op-service/eth/blob.go +++ b/op-service/eth/blob.go @@ -283,36 +283,23 @@ func (b *Blob) Clear() { } } -// CalcBlobFeeDefault calculates the blob fee for the given header using eip4844.CalcBlobFee, -// using the requests hash field of the header as a best-effort heuristic whether -// Prague is active, and the default Ethereum blob schedule. -// -// This is to deal in a best-effort way with situations where the chain config is not -// available, but it can be assumed that per the definition of the Prague fork that -// Prague is active iff the requests hash field is present. -func CalcBlobFeeDefault(header *types.Header) *big.Int { - // We make the assumption that eip4844.CalcBlobFee only needs - // - London and Cancun to be active - // - the Prague time to be set relative to the header time - // and that the caller assumes the default prod Ethereum Blob schedule config. - dummyChainCfg := ¶ms.ChainConfig{ - LondonBlock: common.Big0, - CancunTime: ptr(uint64(0)), - BlobScheduleConfig: params.DefaultBlobSchedule, - } - // We assume that the requests hash is set iff Prague is active. - if header.RequestsHash != nil { - dummyChainCfg.PragueTime = ptr(uint64(0)) - } - return eip4844.CalcBlobFee(dummyChainCfg, header) -} - +// CalcBlobFeeCancun calculates the blob fee for the given header using +// the default blob schedule for Cancun. This function only exists +// to support the L1 Pectra Blob Schedule Fix. The geth function +// eip4844.CalcBlobFee should be used instead. func CalcBlobFeeCancun(excessBlobGas uint64) *big.Int { // Dummy Cancun header for calculation. cancunHeader := &types.Header{ ExcessBlobGas: &excessBlobGas, } - return CalcBlobFeeDefault(cancunHeader) + + // Dummy Cancun chain config for calculation. + dummyChainCfg := ¶ms.ChainConfig{ + LondonBlock: common.Big0, + CancunTime: ptr(uint64(0)), + BlobScheduleConfig: params.DefaultBlobSchedule, + } + return eip4844.CalcBlobFee(dummyChainCfg, cancunHeader) } func ptr[T any](t T) *T { return &t } diff --git a/op-service/eth/blob_test.go b/op-service/eth/blob_test.go index 5b77e9bd29a35..52849fca65a38 100644 --- a/op-service/eth/blob_test.go +++ b/op-service/eth/blob_test.go @@ -5,9 +5,10 @@ import ( "math/rand" "testing" - "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -296,18 +297,52 @@ func TestExtraneousData(t *testing.T) { } } -// TestCalcBlobFeeDefault ensures that the best-effort implementation of CalcBlobFeeDefault -// works as expected. In particular, this test will quickly fail and help detect any changes -// made to the internals of the upstream eip4844.CalcBlobFee function, on which -// CalcBlobFeeDefault relies on with certain assumptions. -func TestCalcBlobFeeDefault(t *testing.T) { - header := &types.Header{ - ExcessBlobGas: ptr(uint64(20 * params.DefaultCancunBlobConfig.UpdateFraction)), - } - cancunBlobFee := CalcBlobFeeDefault(header) +func TestCalcBlobFeeCancun(t *testing.T) { + cancunBlobFee := CalcBlobFeeCancun(uint64(20 * params.DefaultCancunBlobConfig.UpdateFraction)) require.Equal(t, big.NewInt(485165195), cancunBlobFee) +} + +// TestCalcBlobFeeAcrossForksWithFixedExcess tests the blob base fee calculation for different forks. +// Using the Sepolia chain config. +func TestCalcBlobFeeAcrossForksWithFixedExcess(t *testing.T) { + excess := uint64(40_000_000) + header := &types.Header{ExcessBlobGas: &excess, Time: 1754904516, Number: big.NewInt(1)} + cfg := params.SepoliaChainConfig + tests := []struct { + name string + blockTime uint64 + wantBF int64 + }{ + { + name: "Cancun", + blockTime: *cfg.CancunTime, + wantBF: 159773, + }, + { + name: "Prague", + blockTime: *cfg.PragueTime, + wantBF: 2944, + }, + { + name: "Osaka", + blockTime: *cfg.OsakaTime, + wantBF: 2944, + }, + { + name: "BPO1", + blockTime: *cfg.BPO1Time, + wantBF: 120, + }, + { + name: "BPO2", + blockTime: *cfg.BPO2Time, + wantBF: 30, + }, + } - header.RequestsHash = &(common.Hash{}) - pragueBlobFee := CalcBlobFeeDefault(header) - require.Equal(t, big.NewInt(617436), pragueBlobFee) + for _, tt := range tests { + header.Time = tt.blockTime + bf := eip4844.CalcBlobFee(cfg, header) + assert.Equal(t, tt.wantBF, bf.Int64()) + } } diff --git a/op-service/eth/blobs_api.go b/op-service/eth/blobs_api.go index 239f97528edeb..9b7b8f5dee66e 100644 --- a/op-service/eth/blobs_api.go +++ b/op-service/eth/blobs_api.go @@ -44,6 +44,12 @@ type APIGetBlobSidecarsResponse struct { Data []*APIBlobSidecar `json:"data"` } +type APIBeaconBlobsResponse struct { + // There are other fields but we only include the ones we're interested in. + + Data []*Blob `json:"data"` +} + type ReducedGenesisData struct { GenesisTime Uint64String `json:"genesis_time"` } diff --git a/op-service/eth/block_info.go b/op-service/eth/block_info.go index 0b1c7c46c41c8..10e6f5761f369 100644 --- a/op-service/eth/block_info.go +++ b/op-service/eth/block_info.go @@ -4,7 +4,9 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip4844" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -20,7 +22,7 @@ type BlockInfo interface { BaseFee() *big.Int // BlobBaseFee returns the result of computing the blob fee from excessDataGas, or nil if the // block isn't a Dencun (4844 capable) block - BlobBaseFee() *big.Int + BlobBaseFee(chainConfig *params.ChainConfig) *big.Int ExcessBlobGas() *uint64 ReceiptHash() common.Hash GasUsed() uint64 @@ -57,12 +59,12 @@ func ToBlockID(b NumberAndHash) BlockID { // blockInfo is a conversion type of types.Block turning it into a BlockInfo type blockInfo struct{ *types.Block } -func (b blockInfo) BlobBaseFee() *big.Int { +func (b blockInfo) BlobBaseFee(chainConfig *params.ChainConfig) *big.Int { ebg := b.ExcessBlobGas() if ebg == nil { return nil } - return CalcBlobFeeDefault(b.Header()) + return eip4844.CalcBlobFee(chainConfig, b.Header()) } func (b blockInfo) HeaderRLP() ([]byte, error) { @@ -124,11 +126,11 @@ func (h *headerBlockInfo) BaseFee() *big.Int { return h.header.BaseFee } -func (h *headerBlockInfo) BlobBaseFee() *big.Int { +func (h *headerBlockInfo) BlobBaseFee(chainConfig *params.ChainConfig) *big.Int { if h.header.ExcessBlobGas == nil { return nil } - return CalcBlobFeeDefault(h.header) + return eip4844.CalcBlobFee(chainConfig, h.header) } func (h *headerBlockInfo) ExcessBlobGas() *uint64 { diff --git a/op-service/eth/config.go b/op-service/eth/config.go new file mode 100644 index 0000000000000..6632bce12922e --- /dev/null +++ b/op-service/eth/config.go @@ -0,0 +1,23 @@ +package eth + +import ( + "github.com/ethereum/go-ethereum/params" +) + +// L1ChainConfigByChainID returns the chain config for the given chain ID, +// if it is in the set of known chain IDs (Mainnet, Sepolia, Holesky, Hoodi). +// If the chain ID is not known, it returns nil. +func L1ChainConfigByChainID(chainID ChainID) *params.ChainConfig { + switch chainID { + case ChainIDFromBig(params.MainnetChainConfig.ChainID): + return params.MainnetChainConfig + case ChainIDFromBig(params.SepoliaChainConfig.ChainID): + return params.SepoliaChainConfig + case ChainIDFromBig(params.HoleskyChainConfig.ChainID): + return params.HoleskyChainConfig + case ChainIDFromBig(params.HoodiChainConfig.ChainID): + return params.HoodiChainConfig + default: + return nil + } +} diff --git a/op-service/eth/config_test.go b/op-service/eth/config_test.go new file mode 100644 index 0000000000000..2ab4cd4192749 --- /dev/null +++ b/op-service/eth/config_test.go @@ -0,0 +1,31 @@ +package eth + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestL1ChainConfigByChainID(t *testing.T) { + tc := []struct { + chainID uint64 + expectedDepositContractAddress common.Address + shouldBeNil bool + }{ + {1, common.HexToAddress("0x00000000219ab540356cbb839cbe05303d7705fa"), false}, // Mainnet + {11155111, common.HexToAddress("0x7f02c3e3c98b133055b8b348b2ac625669ed295d"), false}, // Sepolia + {17000, common.HexToAddress("0x4242424242424242424242424242424242424242"), false}, // Holesky + {560048, common.HexToAddress("0x00000000219ab540356cBB839Cbe05303d7705Fa"), false}, // Hoodi + {560049, common.HexToAddress("0xdeadbeef"), true}, // Unknown + } + for _, tc := range tc { + config := L1ChainConfigByChainID(ChainIDFromUInt64(tc.chainID)) + + if tc.shouldBeNil { + require.Nil(t, config) + } else { + require.Equal(t, tc.expectedDepositContractAddress, config.DepositContractAddress) + } + } +} diff --git a/op-service/eth/status.go b/op-service/eth/status.go index 52e2e9c58cc4f..4626941b4e23f 100644 --- a/op-service/eth/status.go +++ b/op-service/eth/status.go @@ -22,20 +22,24 @@ func ForkchoiceUpdateErr(payloadStatus PayloadStatusV1) error { } func NewPayloadErr(payload *ExecutionPayload, payloadStatus *PayloadStatusV1) error { + vErr := "" + if payloadStatus.ValidationError != nil { + vErr = *payloadStatus.ValidationError + } switch payloadStatus.Status { case ExecutionValid: return nil case ExecutionSyncing: return fmt.Errorf("failed to execute payload %s, node is syncing", payload.ID()) case ExecutionInvalid: - return fmt.Errorf("execution payload %s was INVALID! Latest valid hash is %s, ignoring bad block: %v", payload.ID(), payloadStatus.LatestValidHash, payloadStatus.ValidationError) + return fmt.Errorf("execution payload %s was INVALID! Latest valid hash is %s, ignoring bad block: %s", payload.ID(), payloadStatus.LatestValidHash, vErr) case ExecutionInvalidBlockHash: - return fmt.Errorf("execution payload %s has INVALID BLOCKHASH! %v", payload.BlockHash, payloadStatus.ValidationError) + return fmt.Errorf("execution payload %s has INVALID BLOCKHASH! %s", payload.BlockHash, vErr) case ExecutionInvalidTerminalBlock: - return fmt.Errorf("engine is misconfigured. Received invalid-terminal-block error while engine API should be active at genesis. err: %v", payloadStatus.ValidationError) + return fmt.Errorf("engine is misconfigured. Received invalid-terminal-block error while engine API should be active at genesis. err: %s", vErr) case ExecutionAccepted: return fmt.Errorf("execution payload cannot be validated yet, latest valid hash is %s", payloadStatus.LatestValidHash) default: - return fmt.Errorf("unknown execution status on %s: %q, ", payload.ID(), string(payloadStatus.Status)) + return fmt.Errorf("unknown execution status on %s: %q; err: %s", payload.ID(), string(payloadStatus.Status), vErr) } } diff --git a/op-service/eth/synctester_session.go b/op-service/eth/synctester_session.go new file mode 100644 index 0000000000000..4cb4f9377702b --- /dev/null +++ b/op-service/eth/synctester_session.go @@ -0,0 +1,80 @@ +package eth + +import ( + "sync" +) + +// FCUState represents the Fork Choice Update state with Latest, Safe, and Finalized block numbers +type FCUState struct { + Latest uint64 `json:"latest"` + Safe uint64 `json:"safe"` + Finalized uint64 `json:"finalized"` +} + +type SyncTesterSession struct { + sync.Mutex + + SessionID string `json:"session_id"` + + // Non canonical view of the chain + Validated uint64 `json:"validated"` + // Canonical view of the chain + CurrentState FCUState `json:"current_state"` + // payloads + Payloads map[PayloadID]*ExecutionPayloadEnvelope `json:"-"` + + ELSyncTarget uint64 `json:"el_sync_target"` + ELSyncActive bool `json:"el_sync_active"` + + InitialState FCUState `json:"initial_state"` + InitialELSyncActive bool `json:"initial_el_sync_active"` +} + +func (s *SyncTesterSession) UpdateFCULatest(latest uint64) { + s.CurrentState.Latest = latest +} + +func (s *SyncTesterSession) UpdateFCUSafe(safe uint64) { + s.CurrentState.Safe = safe +} + +func (s *SyncTesterSession) UpdateFCUFinalized(finalized uint64) { + s.CurrentState.Finalized = finalized +} + +func (s *SyncTesterSession) FinishELSync(target uint64) { + s.ELSyncActive = false + s.Validated = target +} + +func (s *SyncTesterSession) IsELSyncFinished() bool { + return !s.ELSyncActive +} + +func (s *SyncTesterSession) ResetSession() { + s.CurrentState = s.InitialState + s.Validated = s.InitialState.Latest + s.Payloads = make(map[PayloadID]*ExecutionPayloadEnvelope) + s.ELSyncActive = s.InitialELSyncActive +} + +func NewSyncTesterSession(sessionID string, latest, safe, finalized, elSyncTarget uint64, elSyncActive bool) *SyncTesterSession { + return &SyncTesterSession{ + SessionID: sessionID, + Validated: latest, + CurrentState: FCUState{ + Latest: latest, + Safe: safe, + Finalized: finalized, + }, + Payloads: make(map[PayloadID]*ExecutionPayloadEnvelope), + ELSyncTarget: elSyncTarget, + ELSyncActive: elSyncActive, + InitialState: FCUState{ + Latest: latest, + Safe: safe, + Finalized: finalized, + }, + InitialELSyncActive: elSyncActive, + } +} diff --git a/op-service/eth/types.go b/op-service/eth/types.go index 98d6ff4a5c98f..ca8a89736f7b4 100644 --- a/op-service/eth/types.go +++ b/op-service/eth/types.go @@ -271,6 +271,97 @@ type ExecutionPayload struct { WithdrawalsRoot *common.Hash `json:"withdrawalsRoot,omitempty"` } +func (p *ExecutionPayload) CheckEqual(o *ExecutionPayload) error { + if p == nil || o == nil { + if p == o { + return nil + } + return fmt.Errorf("one of the payloads is nil: p=%v, o=%v", p, o) + } + if p.ParentHash != o.ParentHash { + return fmt.Errorf("ParentHash mismatch: %v != %v", p.ParentHash, o.ParentHash) + } + if p.FeeRecipient != o.FeeRecipient { + return fmt.Errorf("FeeRecipient mismatch: %v != %v", p.FeeRecipient, o.FeeRecipient) + } + if p.StateRoot != o.StateRoot { + return fmt.Errorf("StateRoot mismatch: %v != %v", p.StateRoot, o.StateRoot) + } + if p.ReceiptsRoot != o.ReceiptsRoot { + return fmt.Errorf("ReceiptsRoot mismatch: %v != %v", p.ReceiptsRoot, o.ReceiptsRoot) + } + if p.LogsBloom != o.LogsBloom { + return fmt.Errorf("LogsBloom mismatch") + } + if p.PrevRandao != o.PrevRandao { + return fmt.Errorf("PrevRandao mismatch: %v != %v", p.PrevRandao, o.PrevRandao) + } + if p.BlockNumber != o.BlockNumber { + return fmt.Errorf("BlockNumber mismatch: %v != %v", p.BlockNumber, o.BlockNumber) + } + if p.GasLimit != o.GasLimit { + return fmt.Errorf("GasLimit mismatch: %v != %v", p.GasLimit, o.GasLimit) + } + if p.GasUsed != o.GasUsed { + return fmt.Errorf("GasUsed mismatch: %v != %v", p.GasUsed, o.GasUsed) + } + if p.Timestamp != o.Timestamp { + return fmt.Errorf("timestamp mismatch: %v != %v", p.Timestamp, o.Timestamp) + } + if p.BaseFeePerGas != o.BaseFeePerGas { + return fmt.Errorf("BaseFeePerGas mismatch: %v != %v", p.BaseFeePerGas, o.BaseFeePerGas) + } + if p.BlockHash != o.BlockHash { + return fmt.Errorf("BlockHash mismatch: %v != %v", p.BlockHash, o.BlockHash) + } + if !bytes.Equal(p.ExtraData, o.ExtraData) { + return fmt.Errorf("ExtraData mismatch") + } + if len(p.Transactions) != len(o.Transactions) { + return fmt.Errorf("transactions length mismatch: %d != %d", len(p.Transactions), len(o.Transactions)) + } + for i := range p.Transactions { + if !bytes.Equal(p.Transactions[i], o.Transactions[i]) { + return fmt.Errorf("transaction[%d] mismatch", i) + } + } + if (p.Withdrawals == nil) != (o.Withdrawals == nil) { + return fmt.Errorf("withdrawals nil mismatch: %v != %v", p.Withdrawals == nil, o.Withdrawals == nil) + } + if p.Withdrawals != nil { + if p.Withdrawals.Len() != o.Withdrawals.Len() { + return fmt.Errorf("withdrawals length mismatch: %d != %d", p.Withdrawals.Len(), o.Withdrawals.Len()) + } + for i := range p.Withdrawals.Len() { + if ((*p.Withdrawals)[i] == nil) != ((*o.Withdrawals)[i] == nil) { + return fmt.Errorf("withdrawals[%d] nil mismatch", i) + } + if (*p.Withdrawals)[i] != nil && *(*p.Withdrawals)[i] != *(*o.Withdrawals)[i] { + return fmt.Errorf("withdrawals[%d] mismatch", i) + } + } + } + if (p.BlobGasUsed == nil) != (o.BlobGasUsed == nil) { + return fmt.Errorf("BlobGasUsed nil mismatch") + } + if p.BlobGasUsed != nil && *p.BlobGasUsed != *o.BlobGasUsed { + return fmt.Errorf("BlobGasUsed mismatch: %v != %v", *p.BlobGasUsed, *o.BlobGasUsed) + } + if (p.ExcessBlobGas == nil) != (o.ExcessBlobGas == nil) { + return fmt.Errorf("ExcessBlobGas nil mismatch") + } + if p.ExcessBlobGas != nil && *p.ExcessBlobGas != *o.ExcessBlobGas { + return fmt.Errorf("ExcessBlobGas mismatch: %v != %v", *p.ExcessBlobGas, *o.ExcessBlobGas) + } + if (p.WithdrawalsRoot == nil) != (o.WithdrawalsRoot == nil) { + return fmt.Errorf("WithdrawalsRoot nil mismatch") + } + if p.WithdrawalsRoot != nil && *p.WithdrawalsRoot != *o.WithdrawalsRoot { + return fmt.Errorf("WithdrawalsRoot mismatch: %v != %v", *p.WithdrawalsRoot, *o.WithdrawalsRoot) + } + return nil +} + func (payload *ExecutionPayload) ID() BlockID { return BlockID{Hash: payload.BlockHash, Number: uint64(payload.BlockNumber)} } @@ -345,6 +436,9 @@ func (envelope *ExecutionPayloadEnvelope) CheckBlockHash() (actual common.Hash, return blockHash, blockHash == payload.BlockHash } +// BlockAsPayload converts a [*types.Block] to an [ExecutionPayload]. It can only be used to convert +// OP-Stack blocks, as it follows Canyon and Isthmus rules to set the Withdrawals and +// WithdrawalsRoot fields. func BlockAsPayload(bl *types.Block, config *params.ChainConfig) (*ExecutionPayload, error) { baseFee, overflow := uint256.FromBig(bl.BaseFee()) if overflow { @@ -382,11 +476,11 @@ func BlockAsPayload(bl *types.Block, config *params.ChainConfig) (*ExecutionPayl // WithdrawalsRoot is only set starting at Isthmus } - if config.ShanghaiTime != nil && uint64(payload.Timestamp) >= *config.ShanghaiTime { + if config.IsCanyon(uint64(payload.Timestamp)) { payload.Withdrawals = &types.Withdrawals{} } - if config.IsthmusTime != nil && uint64(payload.Timestamp) >= *config.IsthmusTime { + if config.IsIsthmus(uint64(payload.Timestamp)) { payload.WithdrawalsRoot = bl.Header().WithdrawalsHash } @@ -426,6 +520,8 @@ type PayloadAttributes struct { GasLimit *Uint64Quantity `json:"gasLimit,omitempty"` // EIP-1559 parameters, to be specified only post-Holocene EIP1559Params *Bytes8 `json:"eip1559Params,omitempty"` + // MinBaseFee is the minimum base fee, to be specified only post-Jovian + MinBaseFee *uint64 `json:"minBaseFee,omitempty"` } // IsDepositsOnly returns whether all transactions of the PayloadAttributes are of Deposit @@ -518,6 +614,8 @@ type SystemConfig struct { EIP1559Params Bytes8 `json:"eip1559Params"` // OperatorFeeParams identifies the operator fee parameters. OperatorFeeParams Bytes32 `json:"operatorFeeParams"` + // MinBaseFee identifies the minimum base fee. + MinBaseFee uint64 `json:"minBaseFee"` // More fields can be added for future SystemConfig versions. // MarshalPreHolocene indicates whether or not this struct should be diff --git a/op-service/eth/types_test.go b/op-service/eth/types_test.go index ea1d40d930fb8..02d226ac03ac0 100644 --- a/op-service/eth/types_test.go +++ b/op-service/eth/types_test.go @@ -100,12 +100,13 @@ func TestSystemConfigMarshaling(t *testing.T) { Scalar: Bytes32{0x7, 0x8, 0x9}, OperatorFeeParams: Bytes32{0x1, 0x2, 0x3}, GasLimit: 1234, + MinBaseFee: 0, // Leave EIP1559 params empty to prove that the // zero value is sent. } j, err := json.Marshal(sysConfig) require.NoError(t, err) - require.Equal(t, `{"batcherAddr":"0x4100000000000000000000000000000000000000","overhead":"0x0405060000000000000000000000000000000000000000000000000000000000","scalar":"0x0708090000000000000000000000000000000000000000000000000000000000","gasLimit":1234,"eip1559Params":"0x0000000000000000","operatorFeeParams":"0x0102030000000000000000000000000000000000000000000000000000000000"}`, string(j)) + require.Equal(t, `{"batcherAddr":"0x4100000000000000000000000000000000000000","overhead":"0x0405060000000000000000000000000000000000000000000000000000000000","scalar":"0x0708090000000000000000000000000000000000000000000000000000000000","gasLimit":1234,"eip1559Params":"0x0000000000000000","operatorFeeParams":"0x0102030000000000000000000000000000000000000000000000000000000000","minBaseFee":0}`, string(j)) sysConfig.MarshalPreHolocene = true j, err = json.Marshal(sysConfig) require.NoError(t, err) diff --git a/op-service/event/system.go b/op-service/event/system.go index 614940fb33e2a..df55aae3f6aea 100644 --- a/op-service/event/system.go +++ b/op-service/event/system.go @@ -3,15 +3,35 @@ package event import ( "context" "fmt" + "log/slog" + "path/filepath" + "runtime" "slices" + "strings" "sync" "sync/atomic" "testing" "time" "github.com/ethereum/go-ethereum/log" + "github.com/google/uuid" ) +type eventTraceKeyType struct{} + +var ( + ctxKeyEventTrace = eventTraceKeyType{} +) + +type eventTrace struct { + UUID string + Step int +} + +func (e eventTrace) String() string { + return fmt.Sprintf("%s:%d", e.UUID, e.Step) +} + type Registry interface { // Register registers a named event-emitter, optionally processing events itself: // deriver may be nil, not all registrants have to process events. @@ -80,6 +100,39 @@ type systemActor struct { emitPriority Priority } +func (r *systemActor) traceAndLogEventEmitted(ctx context.Context, level slog.Level, ev Event) context.Context { + _, path, line, _ := runtime.Caller(2) // find the location of the caller of Emit() + if strings.Contains(path, "limiter.go") { + _, path, line, _ = runtime.Caller(3) // go one level up the stack to get the correct location, if the caller is rate-limited + } + + file := filepath.Base(path) + dir := filepath.Base(filepath.Dir(path)) + location := fmt.Sprintf("%s/%s:%d", dir, file, line) + + var etrace eventTrace + if ctx.Value(ctxKeyEventTrace) == nil { + etrace = eventTrace{ + UUID: uuid.New().String()[:6], + Step: 0, + } + ctx = context.WithValue(ctx, ctxKeyEventTrace, etrace) + } else { + var ok bool + etrace, ok = ctx.Value(ctxKeyEventTrace).(eventTrace) + if !ok { + r.sys.log.Error("Event trace is not a eventTrace type", "ev", ev, "loc", location) + return ctx + } + etrace.Step++ + ctx = context.WithValue(ctx, ctxKeyEventTrace, etrace) + } + + r.sys.log.Log(level, "Event emitted", "euid", etrace, "ev", ev, "loc", location) + + return ctx +} + // Emit is called by the end-user func (r *systemActor) Emit(ctx context.Context, ev Event) { if ctx == nil { @@ -91,6 +144,12 @@ func (r *systemActor) Emit(ctx context.Context, ev Event) { ctx = context.Background() } } + + level := log.LevelTrace + if r.sys.log.Enabled(ctx, level) { + ctx = r.traceAndLogEventEmitted(ctx, level, ev) + } + if r.ctx.Err() != nil { return } diff --git a/op-service/flags/flags.go b/op-service/flags/flags.go index c9763414c6e49..2f51794b57d5d 100644 --- a/op-service/flags/flags.go +++ b/op-service/flags/flags.go @@ -22,6 +22,7 @@ const ( PectraBlobScheduleOverrideFlagName = "override.pectrablobschedule" IsthmusOverrideFlagName = "override.isthmus" InteropOverrideFlagName = "override.interop" + JovianOverrideFlagName = "override.jovian" ) func CLIFlags(envPrefix string, category string) []cli.Flag { @@ -82,6 +83,13 @@ func CLIFlags(envPrefix string, category string) []cli.Flag { Hidden: false, Category: category, }, + &cli.Uint64Flag{ + Name: JovianOverrideFlagName, + Usage: "Manually specify the Jovian fork timestamp, overriding the bundled setting", + EnvVars: opservice.PrefixEnvVar(envPrefix, "OVERRIDE_JOVIAN"), + Hidden: false, + Category: category, + }, &cli.Uint64Flag{ Name: InteropOverrideFlagName, Usage: "Manually specify the Interop fork timestamp, overriding the bundled setting", diff --git a/op-service/flags/test.go b/op-service/flags/test.go index e690a170a749e..10354541605e3 100644 --- a/op-service/flags/test.go +++ b/op-service/flags/test.go @@ -2,29 +2,20 @@ package flags import ( "flag" - "os" "github.com/ethereum-optimism/optimism/op-service/log" ) -var flLoadTest = flag.Bool("loadtest", false, "Enable load tests during test run") - type TestConfig struct { - LogConfig log.CLIConfig - EnableLoadTests bool + LogConfig log.CLIConfig } func ReadTestConfig() TestConfig { flag.Parse() - loadTest := *flLoadTest - if v := os.Getenv("NAT_LOADTEST"); v != "" { - loadTest = v == "true" - } cfg := log.ReadTestCLIConfig() return TestConfig{ - EnableLoadTests: loadTest, - LogConfig: cfg, + LogConfig: cfg, } } diff --git a/op-service/httputil/downloader.go b/op-service/httputil/downloader.go new file mode 100644 index 0000000000000..cd8f0f4ddcd40 --- /dev/null +++ b/op-service/httputil/downloader.go @@ -0,0 +1,60 @@ +package httputil + +import ( + "context" + "fmt" + "io" + "net/http" + + "github.com/ethereum-optimism/optimism/op-service/ioutil" +) + +type Downloader struct { + Client *http.Client + Progressor ioutil.Progressor + MaxSize int64 +} + +func (d *Downloader) Download(ctx context.Context, url string, out io.Writer) error { + if out == nil { + return fmt.Errorf("output writer is nil") + } + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("failed to create request: %w", err) + } + + client := d.Client + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req) + if err != nil { + return err + } + if resp.Body != nil { + defer resp.Body.Close() + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmt.Errorf("download failed with status code %d: %s", resp.StatusCode, resp.Status) + } + if resp.ContentLength > 0 && d.MaxSize > 0 && resp.ContentLength > d.MaxSize { + return fmt.Errorf("content length %d exceeds maximum allowed size %d", resp.ContentLength, d.MaxSize) + } + + r := io.Reader(resp.Body) + if d.MaxSize > 0 { + r = io.LimitReader(resp.Body, d.MaxSize) + } + + pr := &ioutil.ProgressReader{ + R: r, + Progressor: d.Progressor, + Total: resp.ContentLength, + } + if _, err := io.Copy(out, pr); err != nil { + return fmt.Errorf("failed to write download: %w", err) + } + return nil +} diff --git a/op-service/httputil/downloader_test.go b/op-service/httputil/downloader_test.go new file mode 100644 index 0000000000000..66cc9f52b00f3 --- /dev/null +++ b/op-service/httputil/downloader_test.go @@ -0,0 +1,61 @@ +package httputil + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestDownloader_Download(t *testing.T) { + t.Run("ok", func(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, "test") + })) + t.Cleanup(srv.Close) + + d := new(Downloader) + out := new(bytes.Buffer) + err := d.Download(context.Background(), srv.URL, out) + require.NoError(t, err) + require.Equal(t, "test", out.String()) + }) + + t.Run("above max size", func(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, "test") + })) + t.Cleanup(srv.Close) + + d := &Downloader{ + MaxSize: 2, + } + out := new(bytes.Buffer) + err := d.Download(context.Background(), srv.URL, out) + require.ErrorContains(t, err, "exceeds maximum allowed size") + }) + + t.Run("above max size with fake content length", func(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Header needs to come before WriteHeader otherwise it will be automatically corrected. + w.Header().Set("Content-Length", "1") + w.WriteHeader(http.StatusOK) + _, _ = fmt.Fprint(w, "test") + })) + t.Cleanup(srv.Close) + + d := &Downloader{ + MaxSize: 2, + } + out := new(bytes.Buffer) + err := d.Download(context.Background(), srv.URL, out) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) + }) +} diff --git a/op-service/ioutil/progress.go b/op-service/ioutil/progress.go new file mode 100644 index 0000000000000..680175803cce4 --- /dev/null +++ b/op-service/ioutil/progress.go @@ -0,0 +1,86 @@ +package ioutil + +import ( + "io" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/schollz/progressbar/v3" +) + +type Progressor func(curr, total int64) + +func BarProgressor() Progressor { + var bar *progressbar.ProgressBar + var init sync.Once + return func(curr, total int64) { + init.Do(func() { + bar = progressbar.DefaultBytes(total) + }) + _ = bar.Set64(curr) + } +} + +func NoopProgressor() Progressor { + return func(curr, total int64) {} +} + +type LogProgressor struct { + L log.Logger + Msg string + Interval time.Duration + + lastLog time.Time + mu sync.Mutex +} + +func NewLogProgressor(l log.Logger, msg string) *LogProgressor { + return &LogProgressor{ + L: l, + Msg: msg, + } +} + +func (l *LogProgressor) Progressor(curr, total int64) { + if !l.calcInterval() { + return + } + + msg := l.Msg + if msg == "" { + msg = "progress" + } + l.L.Info(msg, "current", curr, "total", total) +} + +func (l *LogProgressor) calcInterval() bool { + l.mu.Lock() + defer l.mu.Unlock() + + interval := l.Interval + if interval == 0 { + interval = time.Second + } + if time.Since(l.lastLog) < interval { + return false + } + l.lastLog = time.Now() + return true +} + +type ProgressReader struct { + R io.Reader + Progressor Progressor + curr int64 + Total int64 +} + +func (pr *ProgressReader) Read(p []byte) (int, error) { + n, err := pr.R.Read(p) + pr.curr += int64(n) + if pr.Progressor != nil { + pr.Progressor(pr.curr, pr.Total) + } + return n, err +} diff --git a/op-service/ioutil/tar.go b/op-service/ioutil/tar.go new file mode 100644 index 0000000000000..1c952c8edb01a --- /dev/null +++ b/op-service/ioutil/tar.go @@ -0,0 +1,56 @@ +package ioutil + +import ( + "archive/tar" + "bufio" + "fmt" + "io" + "os" + "path" + "strings" +) + +func Untar(outDir string, tr *tar.Reader) error { + for { + hdr, err := tr.Next() + if err == io.EOF { + return nil + } + if err != nil { + return fmt.Errorf("failed to read tar header: %w", err) + } + + cleanedName := path.Clean(hdr.Name) + if strings.Contains(cleanedName, "..") { + return fmt.Errorf("invalid file path: %s", hdr.Name) + } + dst := path.Join(outDir, cleanedName) + if hdr.FileInfo().IsDir() { + if err := os.MkdirAll(dst, 0o755); err != nil { + return fmt.Errorf("failed to create directory: %w", err) + } + continue + } + + if err := untarFile(dst, tr); err != nil { + return fmt.Errorf("failed to untar file: %w", err) + } + } +} + +func untarFile(dst string, tr *tar.Reader) error { + f, err := os.Create(dst) + if err != nil { + return fmt.Errorf("failed to create file: %w", err) + } + defer f.Close() + + buf := bufio.NewWriter(f) + if _, err := io.Copy(buf, tr); err != nil { + return fmt.Errorf("failed to write file: %w", err) + } + if err := buf.Flush(); err != nil { + return fmt.Errorf("failed to flush buffer: %w", err) + } + return nil +} diff --git a/op-service/ioutil/tar_test.go b/op-service/ioutil/tar_test.go new file mode 100644 index 0000000000000..f6da5be3f9a73 --- /dev/null +++ b/op-service/ioutil/tar_test.go @@ -0,0 +1,31 @@ +package ioutil + +import ( + "archive/tar" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUntar(t *testing.T) { + dir := t.TempDir() + f, err := os.Open("testdata/test.tar") + require.NoError(t, err) + defer f.Close() + + tr := tar.NewReader(f) + err = Untar(dir, tr) + require.NoError(t, err) + + rootFile := filepath.Join(dir, "test.txt") + content, err := os.ReadFile(rootFile) + require.NoError(t, err) + require.Equal(t, "test", string(content)) + + nestedFile := filepath.Join(dir, "test", "test.txt") + content, err = os.ReadFile(nestedFile) + require.NoError(t, err) + require.Equal(t, "test", string(content)) +} diff --git a/op-service/ioutil/testdata/test.tar b/op-service/ioutil/testdata/test.tar new file mode 100644 index 0000000000000..a304a2b3023a8 Binary files /dev/null and b/op-service/ioutil/testdata/test.tar differ diff --git a/op-service/jsonutil/json.go b/op-service/jsonutil/json.go index 8549c170d42a5..01a2da9cc5fd2 100644 --- a/op-service/jsonutil/json.go +++ b/op-service/jsonutil/json.go @@ -1,6 +1,7 @@ package jsonutil import ( + "bytes" "encoding/json" "errors" "fmt" @@ -151,3 +152,43 @@ func write[X any](value X, target ioutil.OutputTarget, enc EncoderFactory) error } return nil } + +// LoadJSONFieldStrict loads a JSON file and strictly decodes a specific top-level field into X. +// The rest of the file is ignored, but the selected field must decode without unknown fields +// and without trailing data. The input can be compressed; decompression is handled automatically. +func LoadJSONFieldStrict[X any](inputPath string, field string) (*X, error) { + if inputPath == "" { + return nil, errors.New("no path specified") + } + f, err := ioutil.OpenDecompressed(inputPath) + if err != nil { + return nil, fmt.Errorf("failed to open file %q: %w", inputPath, err) + } + defer f.Close() + + // Decode only the top-level object to extract the desired field, ignoring others. + var top map[string]json.RawMessage + dec := json.NewDecoder(f) + if err := dec.Decode(&top); err != nil { + return nil, fmt.Errorf("failed to decode JSON: %w", err) + } + if _, err := dec.Token(); err != io.EOF { + return nil, errors.New("unexpected trailing data") + } + + raw, ok := top[field] + if !ok { + return nil, fmt.Errorf("missing JSON field %q", field) + } + + strict := json.NewDecoder(bytes.NewReader(raw)) + strict.DisallowUnknownFields() + var out X + if err := strict.Decode(&out); err != nil { + return nil, fmt.Errorf("failed to decode JSON field %q: %w", field, err) + } + if _, err := strict.Token(); err != io.EOF { + return nil, fmt.Errorf("unexpected trailing data in JSON field %q", field) + } + return &out, nil +} diff --git a/op-service/logpipe/go.go b/op-service/logpipe/go.go new file mode 100644 index 0000000000000..868430ae8cdfc --- /dev/null +++ b/op-service/logpipe/go.go @@ -0,0 +1,66 @@ +package logpipe + +import ( + "bytes" + "encoding/json" + "log/slog" + + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/log" +) + +type rawGoJSONLog map[string]any + +type StructuredGoLogEntry struct { + Message string + Level slog.Level + Fields map[string]any +} + +func ParseGoStructuredLogs(line []byte) LogEntry { + dec := json.NewDecoder(bytes.NewReader(line)) + dec.UseNumber() // to preserve number formatting + var e rawGoJSONLog + if err := dec.Decode(&e); err != nil { + return StructuredGoLogEntry{ + Message: "Invalid JSON", + Level: slog.LevelWarn, + Fields: map[string]any{"line": string(line)}, + } + } + lvl, err := oplog.LevelFromString(e["lvl"].(string)) + if err != nil { + lvl = log.LevelInfo + } + msg, _ := e["msg"].(string) + delete(e, "msg") + + return StructuredGoLogEntry{ + Message: msg, + Level: lvl, + Fields: e, + } +} + +func (e StructuredGoLogEntry) LogLevel() slog.Level { + return e.Level +} + +func (e StructuredGoLogEntry) LogMessage() string { + return e.Message +} + +func (e StructuredGoLogEntry) LogFields() []any { + attrs := make([]any, 0, len(e.Fields)) + for k, v := range e.Fields { + if x, ok := v.(json.Number); ok { + v = x.String() + } + attrs = append(attrs, slog.Any(k, v)) + } + return attrs +} + +func (e StructuredGoLogEntry) FieldValue(key string) any { + return e.Fields[key] +} diff --git a/op-service/logpipe/pipe.go b/op-service/logpipe/pipe.go new file mode 100644 index 0000000000000..64e7d1b89d37e --- /dev/null +++ b/op-service/logpipe/pipe.go @@ -0,0 +1,132 @@ +package logpipe + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "io" + "log/slog" + + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum/go-ethereum/log" +) + +type rawRustJSONLog struct { + //"timestamp" ignored + Level string `json:"level"` + Fields map[string]any `json:"fields"` + //"target" ignored" +} + +type StructuredRustLogEntry struct { + Message string + Level slog.Level + Fields map[string]any +} + +func ParseRustStructuredLogs(line []byte) LogEntry { + dec := json.NewDecoder(bytes.NewReader(line)) + dec.UseNumber() // to preserve number formatting + var e rawRustJSONLog + if err := dec.Decode(&e); err != nil { + return StructuredRustLogEntry{ + Message: "Invalid JSON", + Level: slog.LevelWarn, + Fields: map[string]any{"line": string(line)}, + } + } + lvl, err := oplog.LevelFromString(e.Level) + if err != nil { + lvl = log.LevelInfo + } + msg, _ := e.Fields["message"].(string) + delete(e.Fields, "message") + + return StructuredRustLogEntry{ + Message: msg, + Level: lvl, + Fields: e.Fields, + } +} + +func (e StructuredRustLogEntry) LogLevel() slog.Level { + return e.Level +} + +func (e StructuredRustLogEntry) LogMessage() string { + return e.Message +} + +func (e StructuredRustLogEntry) LogFields() []any { + attrs := make([]any, 0, len(e.Fields)) + for k, v := range e.Fields { + if x, ok := v.(json.Number); ok { + v = x.String() + } + attrs = append(attrs, slog.Any(k, v)) + } + return attrs +} + +func (e StructuredRustLogEntry) FieldValue(key string) any { + return e.Fields[key] +} + +type LogEntry interface { + LogLevel() slog.Level + LogMessage() string + LogFields() []any + FieldValue(key string) any +} + +type LogProcessor func(line []byte) + +type LogParser func(line []byte) LogEntry + +func ToLogger(logger log.Logger) func(e LogEntry) { + return func(e LogEntry) { + msg := e.LogMessage() + attrs := e.LogFields() + lvl := e.LogLevel() + + if lvl >= log.LevelCrit { + // If a sub-process has a critical error, this process can handle it + // Don't force an os.Exit, downgrade to error instead + lvl = log.LevelError + attrs = append(attrs, slog.String("innerLevel", "CRIT")) + } + logger.Log(lvl, msg, attrs...) + } +} + +// PipeLogs reads logs from the provided io.ReadCloser (e.g., subprocess stdout), +// and outputs them to the provider logger. +// +// This: +// 1. assumes each line is a JSON object +// 2. parses it +// 3. extracts the "level" and optional "msg" +// 4. treats remaining fields as structured attributes +// 5. logs the entries using the provided log.Logger +// +// Non-JSON lines are logged as warnings. +// Crit level is mapped to error-level, to prevent untrusted crit logs from stopping the process. +// This function processes until the stream ends, and closes the reader. +// This returns the first read error (If we run into EOF, nil returned is returned instead). +func PipeLogs(r io.ReadCloser, onLog LogProcessor) (outErr error) { + defer func() { + outErr = errors.Join(outErr, r.Close()) + }() + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lineBytes := scanner.Bytes() + if len(lineBytes) == 0 { + continue // Skip empty lines + } + onLog(lineBytes) + } + + return scanner.Err() +} diff --git a/op-service/logpipe/pipe_test.go b/op-service/logpipe/pipe_test.go new file mode 100644 index 0000000000000..19b4a1baee405 --- /dev/null +++ b/op-service/logpipe/pipe_test.go @@ -0,0 +1,63 @@ +package logpipe + +import ( + "bytes" + "io" + "sync" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-service/testlog" +) + +func TestPipeLogs(t *testing.T) { + logger, capt := testlog.CaptureLogger(t, log.LevelTrace) + + wg := new(sync.WaitGroup) + wg.Add(2) + + r, w := io.Pipe() + // Write the log output to the pipe + go func() { + defer wg.Done() + _, err := io.Copy(w, bytes.NewReader([]byte(`{"level": "DEBUG", "fields": {"message": "hello", "foo": 1}}`+"\n"))) + require.NoError(t, err) + _, err = io.Copy(w, bytes.NewReader([]byte(`test invalid JSON`+"\n"))) + require.NoError(t, err) + _, err = io.Copy(w, bytes.NewReader([]byte(`{"fields": {"message": "world", "bar": "sunny"}, "level": "INFO"}`+"\n"))) + require.NoError(t, err) + require.NoError(t, w.Close()) + }() + // Read the log output from the pipe + go func() { + defer wg.Done() + toLogger := ToLogger(logger) + logProc := func(line []byte) { + toLogger(ParseRustStructuredLogs(line)) + } + err := PipeLogs(r, logProc) + require.NoError(t, err) + }() + wg.Wait() + + entry1 := capt.FindLog( + testlog.NewLevelFilter(log.LevelDebug), + testlog.NewAttributesContainsFilter("foo", "1")) + require.NotNil(t, entry1) + require.Equal(t, "hello", entry1.Message) + + entry2 := capt.FindLog( + testlog.NewLevelFilter(log.LevelWarn), + testlog.NewAttributesContainsFilter("line", "test invalid JSON")) + require.NotNil(t, entry2) + require.Equal(t, "Invalid JSON", entry2.Message) + + entry3 := capt.FindLog( + testlog.NewLevelFilter(log.LevelInfo), + testlog.NewAttributesContainsFilter("bar", "sunny")) + require.NotNil(t, entry3) + require.Equal(t, "world", entry3.Message) +} diff --git a/op-service/plan/node.go b/op-service/plan/node.go index 7622f5a845c9e..ea8332a52b805 100644 --- a/op-service/plan/node.go +++ b/op-service/plan/node.go @@ -172,6 +172,20 @@ func (p *Lazy[V]) DependOn(dep ...upstreamDep) { p.invalidate() } +// ResetFnAndDependencies sets the Fn to nil and unregisters all existing dependencies from the value. +func (p *Lazy[V]) ResetFnAndDependencies() { + p.mu.Lock() + defer p.mu.Unlock() + p.upstream.Lock() + defer p.upstream.Unlock() + for _, d := range p.upstream.Value { + d.unregister(p) + } + p.upstream.Value = nil + p.fn = nil + p.invalidate() +} + // Set invalidates any downstream deps, and sets the value. func (p *Lazy[V]) Set(v V) { p.mu.Lock() diff --git a/op-service/plan/node_test.go b/op-service/plan/node_test.go index dea7d04067604..a242f84625d58 100644 --- a/op-service/plan/node_test.go +++ b/op-service/plan/node_test.go @@ -105,6 +105,77 @@ func TestNode(t *testing.T) { require.Equal(t, `*plan.Lazy[uint64](*plan.Lazy[uint64], *plan.Lazy[uint32])`, s) }) + t.Run("reset dependencies - no downstream invalidation", func(t *testing.T) { + x := new(plan.Lazy[int]) + y := new(plan.Lazy[int]) + z := new(plan.Lazy[int]) + x.DependOn(y, z) + y.Set(10) + z.Set(20) + x.Fn(func(ctx context.Context) (int, error) { + return y.Value() + z.Value(), nil + }) + val, err := x.Eval(context.Background()) + require.NoError(t, err) + require.Equal(t, 10+20, val) + + x.ResetFnAndDependencies() + x.Set(100) + y.Set(30) // Changing y or z no longer invalidates x + z.Set(20) + val, err = x.Eval(context.Background()) + require.NoError(t, err) + require.Equal(t, 100, val) + }) + + t.Run("reset dependencies - no upstream evaluation", func(t *testing.T) { + x := new(plan.Lazy[int]) + y := new(plan.Lazy[int]) + z := new(plan.Lazy[int]) + x.DependOn(y, z) + x.Fn(func(ctx context.Context) (int, error) { + return 100, nil + }) + dependencyCalls := 0 + countEvaluations := func(ctx context.Context) (int, error) { + dependencyCalls++ + return 0, nil + } + y.Fn(countEvaluations) + z.Fn(countEvaluations) + + x.ResetFnAndDependencies() + x.Fn(func(ctx context.Context) (int, error) { + return 100, nil + }) + val, err := x.Eval(context.Background()) + require.NoError(t, err) + require.Equal(t, 100, val) + require.Zero(t, dependencyCalls, "Previous dependencies should not be evaluated") + }) + + t.Run("reset dependencies - other nodes unaffected", func(t *testing.T) { + x := new(plan.Lazy[int]) + y := new(plan.Lazy[int]) + y.DependOn(x) + y.Fn(func(ctx context.Context) (int, error) { + return x.Value() + 10, nil + }) + + x.Set(5) + val, err := y.Eval(context.Background()) + require.NoError(t, err) + require.Equal(t, 15, val) + + x.ResetFnAndDependencies() + + // y should be re-evaluated even though x no longer has dependencies + x.Set(6) + val, err = y.Eval(context.Background()) + require.NoError(t, err) + require.Equal(t, 16, val) + }) + t.Run("close", func(t *testing.T) { x := new(plan.Lazy[uint64]) y := new(plan.Lazy[int32]) diff --git a/op-service/sources/batcher_admin_client.go b/op-service/sources/batcher_admin_client.go index b454c528d0200..f98b679adb1dd 100644 --- a/op-service/sources/batcher_admin_client.go +++ b/op-service/sources/batcher_admin_client.go @@ -26,3 +26,7 @@ func (cl *BatcherAdminClient) StartBatcher(ctx context.Context) error { func (cl *BatcherAdminClient) StopBatcher(ctx context.Context) error { return cl.client.CallContext(ctx, nil, "admin_stopBatcher") } + +func (cl *BatcherAdminClient) FlushBatcher(ctx context.Context) error { + return cl.client.CallContext(ctx, nil, "admin_flushBatcher") +} diff --git a/op-service/sources/batching/call.go b/op-service/sources/batching/call.go index aa96a5263730c..fdd42b429c5af 100644 --- a/op-service/sources/batching/call.go +++ b/op-service/sources/batching/call.go @@ -73,3 +73,7 @@ func (c *CallResult) GetBytes32Slice(i int) [][32]byte { func (c *CallResult) GetString(i int) string { return *abi.ConvertType(c.out[i], new(string)).(*string) } + +func (c *CallResult) Get(i int) interface{} { + return c.out[i] +} diff --git a/op-service/sources/eth_client.go b/op-service/sources/eth_client.go index 67800cdaa634c..da0ee65b6d646 100644 --- a/op-service/sources/eth_client.go +++ b/op-service/sources/eth_client.go @@ -524,9 +524,9 @@ func (s *EthClient) SuggestGasPrice(ctx context.Context) (*big.Int, error) { } // Call executes a message call transaction but never mined into the blockchain. -func (s *EthClient) Call(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) { +func (s *EthClient) Call(ctx context.Context, msg ethereum.CallMsg, blockNumber rpc.BlockNumber) ([]byte, error) { var hex hexutil.Bytes - err := s.client.CallContext(ctx, &hex, "eth_call", ToCallArg(msg), "pending") + err := s.client.CallContext(ctx, &hex, "eth_call", ToCallArg(msg), blockNumber) if err != nil { return nil, err } diff --git a/op-service/sources/l1_beacon_client.go b/op-service/sources/l1_beacon_client.go index 0999d279ddde7..fa4bb1c8f29c8 100644 --- a/op-service/sources/l1_beacon_client.go +++ b/op-service/sources/l1_beacon_client.go @@ -13,7 +13,7 @@ import ( "sync" "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/crypto/kzg4844" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/client" @@ -25,6 +25,7 @@ const ( specMethod = "eth/v1/config/spec" genesisMethod = "eth/v1/beacon/genesis" sidecarsMethodPrefix = "eth/v1/beacon/blob_sidecars/" + blobsMethodPrefix = "eth/v1/beacon/blobs/" ) type L1BeaconClientConfig struct { @@ -100,6 +101,19 @@ func (cl *BeaconHTTPClient) BeaconGenesis(ctx context.Context) (eth.APIGenesisRe return genesisResp, nil } +func (cl *BeaconHTTPClient) BeaconBlobs(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIBeaconBlobsResponse, error) { + reqQuery := url.Values{} + for _, hash := range hashes { + reqQuery.Add("versioned_hashes", hash.Hash.Hex()) + } + reqPath := path.Join(blobsMethodPrefix, strconv.FormatUint(slot, 10)) + var blobsResp eth.APIBeaconBlobsResponse + if err := cl.apiReq(ctx, &blobsResp, reqPath, reqQuery); err != nil { + return eth.APIBeaconBlobsResponse{}, err + } + return blobsResp, nil +} + func (cl *BeaconHTTPClient) BeaconBlobSideCars(ctx context.Context, fetchAllSidecars bool, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error) { reqPath := path.Join(sidecarsMethodPrefix, strconv.FormatUint(slot, 10)) var reqQuery url.Values @@ -170,8 +184,8 @@ func NewL1BeaconClient(cl apis.BeaconClient, cfg L1BeaconClientConfig, fallbacks type TimeToSlotFn func(timestamp uint64) (uint64, error) -// GetTimeToSlotFn returns a function that converts a timestamp to a slot number. -func (cl *L1BeaconClient) GetTimeToSlotFn(ctx context.Context) (TimeToSlotFn, error) { +// getTimeToSlotFn returns a function that converts a timestamp to a slot number. +func (cl *L1BeaconClient) getTimeToSlotFn(ctx context.Context) (TimeToSlotFn, error) { cl.initLock.Lock() defer cl.initLock.Unlock() if cl.timeToSlotFn != nil { @@ -202,6 +216,18 @@ func (cl *L1BeaconClient) GetTimeToSlotFn(ctx context.Context) (TimeToSlotFn, er return cl.timeToSlotFn, nil } +func (cl *L1BeaconClient) timeToSlot(ctx context.Context, timestamp uint64) (uint64, error) { + slotFn, err := cl.getTimeToSlotFn(ctx) + if err != nil { + return 0, fmt.Errorf("get time to slot fn: %w", err) + } + slot, err := slotFn(timestamp) + if err != nil { + return 0, fmt.Errorf("convert timestamp %d to slot number: %w", timestamp, err) + } + return slot, nil +} + func (cl *L1BeaconClient) fetchSidecars(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIGetBlobSidecarsResponse, error) { var errs []error for i := 0; i < cl.pool.Len(); i++ { @@ -225,18 +251,21 @@ func (cl *L1BeaconClient) GetBlobSidecars(ctx context.Context, ref eth.L1BlockRe if len(hashes) == 0 { return []*eth.BlobSidecar{}, nil } - slotFn, err := cl.GetTimeToSlotFn(ctx) + slot, err := cl.timeToSlot(ctx, ref.Time) if err != nil { - return nil, fmt.Errorf("failed to get time to slot function: %w", err) + return nil, err } - slot, err := slotFn(ref.Time) + sidecars, err := cl.getBlobSidecars(ctx, slot, hashes) if err != nil { - return nil, fmt.Errorf("error in converting ref.Time to slot: %w", err) + return nil, fmt.Errorf("get blob sidecars for block %v: %w", ref, err) } + return sidecars, nil +} +func (cl *L1BeaconClient) getBlobSidecars(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash) ([]*eth.BlobSidecar, error) { resp, err := cl.fetchSidecars(ctx, slot, hashes) if err != nil { - return nil, fmt.Errorf("failed to fetch blob sidecars for slot %v block %v: %w", slot, ref, err) + return nil, fmt.Errorf("failed to fetch blob sidecars for slot %v: %w", slot, err) } apiscs := make([]*eth.APIBlobSidecar, 0, len(hashes)) @@ -267,44 +296,74 @@ func (cl *L1BeaconClient) GetBlobSidecars(ctx context.Context, ref eth.L1BlockRe // blob's validity by checking its proof against the commitment, and confirming the commitment // hashes to the expected value. Returns error if any blob is found invalid. func (cl *L1BeaconClient) GetBlobs(ctx context.Context, ref eth.L1BlockRef, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error) { - blobSidecars, err := cl.GetBlobSidecars(ctx, ref, hashes) + if len(hashes) == 0 { + return []*eth.Blob{}, nil + } + slot, err := cl.timeToSlot(ctx, ref.Time) if err != nil { - return nil, fmt.Errorf("failed to get blob sidecars for L1BlockRef %s: %w", ref, err) + return nil, err } - blobs, err := blobsFromSidecars(blobSidecars, hashes) + resp, err := cl.cl.BeaconBlobs(ctx, slot, hashes) if err != nil { - return nil, fmt.Errorf("failed to get blobs from sidecars for L1BlockRef %s: %w", ref, err) + // We would normally check for an explicit error like "method not found", but the Beacon + // API doesn't standardize such a response. Thus, we interpret all errors as + // "method not found" and fall back to fetching sidecars. + blobSidecars, err := cl.getBlobSidecars(ctx, slot, hashes) + if err != nil { + return nil, fmt.Errorf("failed to get blob sidecars for L1BlockRef %s: %w", ref, err) + } + blobs, err := blobsFromSidecars(blobSidecars, hashes) + if err != nil { + return nil, fmt.Errorf("failed to get blobs from sidecars for L1BlockRef %s: %w", ref, err) + } + return blobs, nil + } + if len(resp.Data) != len(hashes) { + return nil, fmt.Errorf("expected %d blobs but got %d", len(hashes), len(resp.Data)) + } + var blobs []*eth.Blob + for i, blob := range resp.Data { + if err := verifyBlob(blob, hashes[i].Hash); err != nil { + return nil, fmt.Errorf("blob %d failed verification: %w", i, err) + } + blobs = append(blobs, blob) } return blobs, nil } +// blobsFromSidecars pulls the blobs from the sidecars and verifies them against the supplied hashes. func blobsFromSidecars(blobSidecars []*eth.BlobSidecar, hashes []eth.IndexedBlobHash) ([]*eth.Blob, error) { if len(blobSidecars) != len(hashes) { return nil, fmt.Errorf("number of hashes and blobSidecars mismatch, %d != %d", len(hashes), len(blobSidecars)) } - out := make([]*eth.Blob, len(hashes)) for i, ih := range hashes { sidecar := blobSidecars[i] if sidx := uint64(sidecar.Index); sidx != ih.Index { return nil, fmt.Errorf("expected sidecars to be ordered by hashes, but got %d != %d", sidx, ih.Index) } - - // make sure the blob's kzg commitment hashes to the expected value - hash := eth.KZGToVersionedHash(kzg4844.Commitment(sidecar.KZGCommitment)) - if hash != ih.Hash { - return nil, fmt.Errorf("expected hash %s for blob at index %d but got %s", ih.Hash, ih.Index, hash) - } - - // confirm blob data is valid by verifying its proof against the commitment - if err := eth.VerifyBlobProof(&sidecar.Blob, kzg4844.Commitment(sidecar.KZGCommitment), kzg4844.Proof(sidecar.KZGProof)); err != nil { - return nil, fmt.Errorf("blob at index %d failed verification: %w", i, err) + if err := verifyBlob(&sidecar.Blob, ih.Hash); err != nil { + return nil, fmt.Errorf("blob %d failed verification: %w", i, err) } out[i] = &sidecar.Blob } return out, nil } +// verifyBlob verifies that the blob data corresponds to the provided commitment. +// It recomputes the commitment from the blob data and checks it matches the expected commitment hash. +func verifyBlob(blob *eth.Blob, expectedCommitmentHash common.Hash) error { + recomputedCommitment, err := blob.ComputeKZGCommitment() + if err != nil { + return fmt.Errorf("cannot compute KZG commitment for blob: %w", err) + } + recomputedCommitmentHash := eth.KZGToVersionedHash(recomputedCommitment) + if recomputedCommitmentHash != expectedCommitmentHash { + return fmt.Errorf("recomputed commitment %s does not match expected commitment %s", recomputedCommitmentHash, expectedCommitmentHash) + } + return nil +} + // GetVersion fetches the version of the Beacon-node. func (cl *L1BeaconClient) GetVersion(ctx context.Context) (string, error) { return cl.cl.NodeVersion(ctx) diff --git a/op-service/sources/l1_beacon_client_test.go b/op-service/sources/l1_beacon_client_test.go index 8e312f3c8eb27..009b3d82f78fe 100644 --- a/op-service/sources/l1_beacon_client_test.go +++ b/op-service/sources/l1_beacon_client_test.go @@ -13,8 +13,11 @@ import ( "strconv" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto/kzg4844" client_mocks "github.com/ethereum-optimism/optimism/op-service/client/mocks" @@ -97,6 +100,40 @@ func TestBlobsFromSidecars(t *testing.T) { hashes[2].Hash[17]++ _, err = blobsFromSidecars(sidecars, hashes) require.Error(t, err) + +} + +func KZGProofFromHex(s string) (kzg4844.Proof, error) { + var out kzg4844.Proof // underlying size is 48 bytes + b, err := hexutil.Decode(s) + if err != nil { + return out, err + } + if len(b) != 48 { + return out, fmt.Errorf("want 48 bytes, got %d", len(b)) + } + copy(out[:], b) + return out, nil +} + +var badProof, _ = KZGProofFromHex("0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000") + +func TestBlobsFromSidecars_BadProof(t *testing.T) { + indices := []uint64{5, 7, 2} + index0, sidecar0 := makeTestBlobSidecar(indices[0]) + index1, sidecar1 := makeTestBlobSidecar(indices[1]) + index2, sidecar2 := makeTestBlobSidecar(indices[2]) + hashes := []eth.IndexedBlobHash{index0, index1, index2} + + sidecars := []*eth.BlobSidecar{sidecar0, sidecar1, sidecar2} + + // Set proof to a bad / stubbed value + sidecars[1].KZGProof = eth.Bytes48(badProof) + + // Check that verification succeeds, the proof is not required + _, err := blobsFromSidecars(sidecars, hashes) + require.NoError(t, err) + } func TestBlobsFromSidecars_EmptySidecarList(t *testing.T) { @@ -186,7 +223,46 @@ func TestBeaconClientFallback(t *testing.T) { resp, err = c.GetBlobSidecars(ctx, eth.L1BlockRef{Time: 14}, hashes) require.Equal(t, sidecars, resp) require.NoError(t, err) +} + +func TestBeaconClientBadProof(t *testing.T) { + indices := []uint64{5, 7, 2} + index0, sidecar0 := makeTestBlobSidecar(indices[0]) + index1, sidecar1 := makeTestBlobSidecar(indices[1]) + index2, sidecar2 := makeTestBlobSidecar(indices[2]) + hashes := []eth.IndexedBlobHash{index0, index1, index2} + sidecars := []*eth.BlobSidecar{sidecar0, sidecar1, sidecar2} + blobs := []*eth.Blob{&sidecar0.Blob, &sidecar1.Blob, &sidecar2.Blob} + + // invalidate proof + sidecar1.KZGProof = eth.Bytes48(badProof) + apiSidecars := toAPISideCars(sidecars) + + t.Run("fallback to BeaconBlobSideCars", func(t *testing.T) { + ctx := context.Background() + p := mocks.NewBeaconClient(t) + p.EXPECT().BeaconGenesis(ctx).Return(eth.APIGenesisResponse{Data: eth.ReducedGenesisData{GenesisTime: 10}}, nil) + p.EXPECT().ConfigSpec(ctx).Return(eth.APIConfigResponse{Data: eth.ReducedConfigData{SecondsPerSlot: 2}}, nil) + client := NewL1BeaconClient(p, L1BeaconClientConfig{}) + ref := eth.L1BlockRef{Time: 12} + p.EXPECT().BeaconBlobs(ctx, uint64(1), hashes).Return(eth.APIBeaconBlobsResponse{}, errors.New("the sky is falling")) + p.EXPECT().BeaconBlobSideCars(ctx, false, uint64(1), hashes).Return(eth.APIGetBlobSidecarsResponse{Data: apiSidecars}, nil) + _, err := client.GetBlobs(ctx, ref, hashes) + assert.NoError(t, err) + }) + + t.Run("BeaconBlobs", func(t *testing.T) { + ctx := context.Background() + p := mocks.NewBeaconClient(t) + p.EXPECT().BeaconGenesis(ctx).Return(eth.APIGenesisResponse{Data: eth.ReducedGenesisData{GenesisTime: 10}}, nil) + p.EXPECT().ConfigSpec(ctx).Return(eth.APIConfigResponse{Data: eth.ReducedConfigData{SecondsPerSlot: 2}}, nil) + client := NewL1BeaconClient(p, L1BeaconClientConfig{}) + ref := eth.L1BlockRef{Time: 12} + p.EXPECT().BeaconBlobs(ctx, uint64(1), hashes).Return(eth.APIBeaconBlobsResponse{Data: blobs}, nil) + _, err := client.GetBlobs(ctx, ref, hashes) + assert.NoError(t, err) + }) } func TestBeaconHTTPClient(t *testing.T) { @@ -234,3 +310,14 @@ func TestClientPoolSeveral(t *testing.T) { p.MoveToNext() } } + +func TestVerifyBlob(t *testing.T) { + blob := eth.Blob{} + blob[0] = byte(7) + versionedHash := common.HexToHash("0x0164e32184169f11528f72aeb318f94d958aa28fba0731a52aead6df0104a98e") + require.NoError(t, verifyBlob(&blob, versionedHash)) + + differentBlob := eth.Blob{} + differentBlob[0] = byte(8) + require.Error(t, verifyBlob(&differentBlob, versionedHash)) +} diff --git a/op-service/sources/mocks/BeaconClient.go b/op-service/sources/mocks/BeaconClient.go index 89b07d0670c59..bff9f84940ac7 100644 --- a/op-service/sources/mocks/BeaconClient.go +++ b/op-service/sources/mocks/BeaconClient.go @@ -81,6 +81,64 @@ func (_c *BeaconClient_BeaconBlobSideCars_Call) RunAndReturn(run func(context.Co return _c } +// BeaconBlobs provides a mock function with given fields: ctx, slot, hashes +func (_m *BeaconClient) BeaconBlobs(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash) (eth.APIBeaconBlobsResponse, error) { + ret := _m.Called(ctx, slot, hashes) + + if len(ret) == 0 { + panic("no return value specified for BeaconBlobs") + } + + var r0 eth.APIBeaconBlobsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, uint64, []eth.IndexedBlobHash) (eth.APIBeaconBlobsResponse, error)); ok { + return rf(ctx, slot, hashes) + } + if rf, ok := ret.Get(0).(func(context.Context, uint64, []eth.IndexedBlobHash) eth.APIBeaconBlobsResponse); ok { + r0 = rf(ctx, slot, hashes) + } else { + r0 = ret.Get(0).(eth.APIBeaconBlobsResponse) + } + + if rf, ok := ret.Get(1).(func(context.Context, uint64, []eth.IndexedBlobHash) error); ok { + r1 = rf(ctx, slot, hashes) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BeaconClient_BeaconBlobs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BeaconBlobs' +type BeaconClient_BeaconBlobs_Call struct { + *mock.Call +} + +// BeaconBlobs is a helper method to define mock.On call +// - ctx context.Context +// - slot uint64 +// - hashes []eth.IndexedBlobHash +func (_e *BeaconClient_Expecter) BeaconBlobs(ctx interface{}, slot interface{}, hashes interface{}) *BeaconClient_BeaconBlobs_Call { + return &BeaconClient_BeaconBlobs_Call{Call: _e.mock.On("BeaconBlobs", ctx, slot, hashes)} +} + +func (_c *BeaconClient_BeaconBlobs_Call) Run(run func(ctx context.Context, slot uint64, hashes []eth.IndexedBlobHash)) *BeaconClient_BeaconBlobs_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(uint64), args[2].([]eth.IndexedBlobHash)) + }) + return _c +} + +func (_c *BeaconClient_BeaconBlobs_Call) Return(_a0 eth.APIBeaconBlobsResponse, _a1 error) *BeaconClient_BeaconBlobs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *BeaconClient_BeaconBlobs_Call) RunAndReturn(run func(context.Context, uint64, []eth.IndexedBlobHash) (eth.APIBeaconBlobsResponse, error)) *BeaconClient_BeaconBlobs_Call { + _c.Call.Return(run) + return _c +} + // BeaconGenesis provides a mock function with given fields: ctx func (_m *BeaconClient) BeaconGenesis(ctx context.Context) (eth.APIGenesisResponse, error) { ret := _m.Called(ctx) diff --git a/op-service/sources/sync_tester_client.go b/op-service/sources/sync_tester_client.go new file mode 100644 index 0000000000000..ba19034537193 --- /dev/null +++ b/op-service/sources/sync_tester_client.go @@ -0,0 +1,47 @@ +package sources + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type SyncTesterClient struct { + client client.RPC +} + +var _ apis.SyncTester = (*SyncTesterClient)(nil) + +func NewSyncTesterClient(client client.RPC) *SyncTesterClient { + return &SyncTesterClient{ + client: client, + } +} + +func (cl *SyncTesterClient) ChainID(ctx context.Context) (eth.ChainID, error) { + var result eth.ChainID + err := cl.client.CallContext(ctx, &result, "eth_chainId") + return result, err +} + +func (cl *SyncTesterClient) GetSession(ctx context.Context) (*eth.SyncTesterSession, error) { + var session *eth.SyncTesterSession + err := cl.client.CallContext(ctx, &session, "sync_getSession") + return session, err +} + +func (cl *SyncTesterClient) ListSessions(ctx context.Context) ([]string, error) { + var sessions []string + err := cl.client.CallContext(ctx, &sessions, "sync_listSessions") + return sessions, err +} + +func (cl *SyncTesterClient) DeleteSession(ctx context.Context) error { + return cl.client.CallContext(ctx, nil, "sync_deleteSession") +} + +func (cl *SyncTesterClient) ResetSession(ctx context.Context) error { + return cl.client.CallContext(ctx, nil, "sync_resetSession") +} diff --git a/op-service/sources/types.go b/op-service/sources/types.go index 0bf7ed4175519..efbbe78ed2c26 100644 --- a/op-service/sources/types.go +++ b/op-service/sources/types.go @@ -257,24 +257,30 @@ func (block *RPCBlock) ExecutionPayloadEnvelope(trustCache bool) (*eth.Execution } payload := ð.ExecutionPayload{ - ParentHash: block.ParentHash, - FeeRecipient: block.Coinbase, - StateRoot: eth.Bytes32(block.Root), - ReceiptsRoot: eth.Bytes32(block.ReceiptHash), - LogsBloom: block.Bloom, - PrevRandao: eth.Bytes32(block.MixDigest), // mix-digest field is used for prevRandao post-merge - BlockNumber: block.Number, - GasLimit: block.GasLimit, - GasUsed: block.GasUsed, - Timestamp: block.Time, - ExtraData: eth.BytesMax32(block.Extra), - BaseFeePerGas: eth.Uint256Quantity(baseFee), - BlockHash: block.Hash, - Transactions: opaqueTxs, - Withdrawals: block.Withdrawals, - BlobGasUsed: block.BlobGasUsed, - ExcessBlobGas: block.ExcessBlobGas, - WithdrawalsRoot: block.WithdrawalsRoot, + ParentHash: block.ParentHash, + FeeRecipient: block.Coinbase, + StateRoot: eth.Bytes32(block.Root), + ReceiptsRoot: eth.Bytes32(block.ReceiptHash), + LogsBloom: block.Bloom, + PrevRandao: eth.Bytes32(block.MixDigest), // mix-digest field is used for prevRandao post-merge + BlockNumber: block.Number, + GasLimit: block.GasLimit, + GasUsed: block.GasUsed, + Timestamp: block.Time, + ExtraData: eth.BytesMax32(block.Extra), + BaseFeePerGas: eth.Uint256Quantity(baseFee), + BlockHash: block.Hash, + Transactions: opaqueTxs, + Withdrawals: block.Withdrawals, + BlobGasUsed: block.BlobGasUsed, + ExcessBlobGas: block.ExcessBlobGas, + } + + // Only Isthmus execution payloads must set the withdrawals root. + // They are guaranteed to not be the empty withdrawals hash, which is set pre-Isthmus (post-Canyon). + if wr := block.WithdrawalsRoot; wr != nil && *wr != types.EmptyWithdrawalsHash { + wr := *wr + payload.WithdrawalsRoot = &wr } return ð.ExecutionPayloadEnvelope{ diff --git a/op-service/tasks/await.go b/op-service/tasks/await.go new file mode 100644 index 0000000000000..490fd2ca17437 --- /dev/null +++ b/op-service/tasks/await.go @@ -0,0 +1,15 @@ +package tasks + +import "context" + +// Await waits for a value, and sets it to the destination value. +// This returns an error if the context closes before a value is received from the channel. +func Await[E any](ctx context.Context, src chan E, dest *E) error { + select { + case <-ctx.Done(): + return ctx.Err() + case x := <-src: + *dest = x + return nil + } +} diff --git a/op-service/testlog/testlog.go b/op-service/testlog/testlog.go index 2363530e23712..0f03f9680c492 100644 --- a/op-service/testlog/testlog.go +++ b/op-service/testlog/testlog.go @@ -106,6 +106,8 @@ var ( ) func fileHandler(t Testing, outdir string, level slog.Level) slog.Handler { + var rootLoggerName string + rootSetup.Do(func() { f, err := os.OpenFile(path.Join(outdir, fmt.Sprintf("root-%d.log", os.Getpid())), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { @@ -119,6 +121,7 @@ func fileHandler(t Testing, outdir string, level slog.Level) slog.Handler { rootHdlr := log.NewTerminalHandlerWithLevel(writer, level, false) oplog.SetGlobalLogHandler(rootHdlr) t.Logf("redirecting root logger to %s", f.Name()) + rootLoggerName = f.Name() }) testName := fmt.Sprintf( @@ -146,6 +149,8 @@ func fileHandler(t Testing, outdir string, level slog.Level) slog.Handler { flMtx.Unlock() }) t.Logf("writing test log to %s", logPath) + t.Logf("some tests may have written to the root logger") + t.Logf("logs from the root logger have been written to %s", rootLoggerName) h := log.NewTerminalHandlerWithLevel(dw, level, false) flHandlers[testName] = h return h diff --git a/op-service/testutils/common.go b/op-service/testutils/common.go index 55cb59ae98ca6..f1bdb4860be70 100644 --- a/op-service/testutils/common.go +++ b/op-service/testutils/common.go @@ -11,10 +11,11 @@ import ( func IsolatedTestDirWithAutoCleanup(t *testing.T) string { basePath := os.Getenv("TEST_ARTIFACTS_DIR") if basePath == "" { - basePath = "./.tests" + basePath = t.TempDir() } dir := path.Join(basePath, t.Name()) - // the dir's existence should be handled by Download as well else it should be left to break + require.NoError(t, os.MkdirAll(dir, 0755)) + t.Cleanup(func() { require.NoError(t, os.RemoveAll(dir)) }) diff --git a/op-service/testutils/l1info.go b/op-service/testutils/l1info.go index cc41669ad051c..39b03f4e00511 100644 --- a/op-service/testutils/l1info.go +++ b/op-service/testutils/l1info.go @@ -8,6 +8,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/params" ) var _ eth.BlockInfo = &MockBlockInfo{} @@ -66,7 +67,7 @@ func (l *MockBlockInfo) BaseFee() *big.Int { return l.InfoBaseFee } -func (l *MockBlockInfo) BlobBaseFee() *big.Int { +func (l *MockBlockInfo) BlobBaseFee(chainConfig *params.ChainConfig) *big.Int { return l.InfoBlobBaseFee } diff --git a/op-service/testutils/random.go b/op-service/testutils/random.go index 01c3b3aad42e6..87f50f666699c 100644 --- a/op-service/testutils/random.go +++ b/op-service/testutils/random.go @@ -379,7 +379,11 @@ func RandomBlock(rng *rand.Rand, txCount uint64) (*types.Block, []*types.Receipt func RandomBlockPrependTxsWithTime(rng *rand.Rand, txCount int, t uint64, ptxs ...*types.Transaction) (*types.Block, []*types.Receipt) { header := RandomHeaderWithTime(rng, t) - chainID := big.NewInt(rng.Int63n(1000)) + chainIDInt := rng.Int63n(1000) + if chainIDInt == 0 { // Zero chainID is invalid. + chainIDInt++ + } + chainID := big.NewInt(chainIDInt) signer := types.NewIsthmusSigner(chainID, false) txs := make([]*types.Transaction, 0, txCount+len(ptxs)) txs = append(txs, ptxs...) diff --git a/op-service/testutils/tcpproxy/proxy.go b/op-service/testutils/tcpproxy/proxy.go new file mode 100644 index 0000000000000..a86fd327b5a91 --- /dev/null +++ b/op-service/testutils/tcpproxy/proxy.go @@ -0,0 +1,142 @@ +package tcpproxy + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/ethereum-optimism/optimism/op-service/retry" + + "github.com/ethereum/go-ethereum/log" +) + +type Proxy struct { + mu sync.Mutex + conns map[net.Conn]struct{} + lis net.Listener + wg sync.WaitGroup + lgr log.Logger + upstreamAddr string + stopped atomic.Bool +} + +func New(lgr log.Logger) *Proxy { + return &Proxy{ + conns: make(map[net.Conn]struct{}), + lgr: lgr, + } +} + +func (p *Proxy) Addr() string { + return p.lis.Addr().String() +} + +func (p *Proxy) SetUpstream(addr string) { + p.mu.Lock() + p.upstreamAddr = addr + p.lgr.Info("set upstream", "addr", addr) + p.mu.Unlock() +} + +func (p *Proxy) Start() error { + lis, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return fmt.Errorf("could not listen: %w", err) + } + p.lis = lis + + p.wg.Add(1) + go func() { + defer p.wg.Done() + + for { + downConn, err := p.lis.Accept() + if p.stopped.Load() { + return + } + if err != nil { + p.lgr.Error("failed to accept downstream", "err", err) + continue + } + + p.wg.Add(1) + go func() { + defer p.wg.Done() + p.handleConn(downConn) + }() + } + }() + + return nil +} + +func (p *Proxy) handleConn(downConn net.Conn) { + defer downConn.Close() + + p.mu.Lock() + addr := p.upstreamAddr + if addr == "" { + p.mu.Unlock() + p.lgr.Error("upstream not set") + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + upConn, err := retry.Do(ctx, 3, retry.Exponential(), func() (net.Conn, error) { + return net.Dial("tcp", addr) + }) + cancel() + if err != nil { + p.mu.Unlock() + p.lgr.Error("failed to dial upstream", "err", err) + return + } + defer upConn.Close() + p.conns[downConn] = struct{}{} + p.conns[upConn] = struct{}{} + p.mu.Unlock() + + var wg sync.WaitGroup + wg.Add(2) + + closeBoth := func() { + downConn.Close() + upConn.Close() + wg.Done() + } + + pump := func(dst io.Writer, src io.Reader, direction string) { + defer closeBoth() + if _, err := io.Copy(dst, src); err != nil { + // ignore net.ErrClosed since it creates a huge amount of log spam + if !errors.Is(err, net.ErrClosed) { + p.lgr.Error("failed to proxy", "direction", direction, "err", err) + } + } + } + go pump(downConn, upConn, "downstream") + go pump(upConn, downConn, "upstream") + wg.Wait() + + p.mu.Lock() + delete(p.conns, downConn) + delete(p.conns, upConn) + p.mu.Unlock() +} + +func (p *Proxy) Close() error { + p.stopped.Store(true) + p.lis.Close() + p.mu.Lock() + for conn := range p.conns { + conn.Close() + } + p.mu.Unlock() + p.wg.Wait() + return nil +} diff --git a/op-service/txinclude/nonce_manager.go b/op-service/txinclude/nonce_manager.go index 3b1eb64d51c15..1bf546721a5c7 100644 --- a/op-service/txinclude/nonce_manager.go +++ b/op-service/txinclude/nonce_manager.go @@ -35,10 +35,14 @@ func (nm *nonceManager) Next() uint64 { return nonce } -// InsertGap inserts a nonce gap. It is a no-op if nonce is already a gap. +// InsertGap inserts a nonce gap. It is a no-op if nonce is already a gap or if it is ahead of the +// current nonce. func (nm *nonceManager) InsertGap(nonce uint64) { nm.mu.Lock() defer nm.mu.Unlock() + if nonce >= nm.nextNonce { + return + } i, exists := slices.BinarySearch(nm.gaps, nonce) if exists { return diff --git a/op-service/txinclude/nonce_manager_test.go b/op-service/txinclude/nonce_manager_test.go index 386859fe9b0d5..611f10f8d2570 100644 --- a/op-service/txinclude/nonce_manager_test.go +++ b/op-service/txinclude/nonce_manager_test.go @@ -87,4 +87,28 @@ func TestNonceManagerInsertGap(t *testing.T) { require.Equal(t, uint64(30), nm.Next()) require.Equal(t, uint64(100), nm.Next()) }) + + t.Run("future gap is a no-op", func(t *testing.T) { + nm := newNonceManager(20) + + nm.InsertGap(21) + + require.Equal(t, uint64(20), nm.Next()) + require.Equal(t, uint64(21), nm.Next()) + require.Equal(t, uint64(22), nm.Next()) + }) + + t.Run("handles multiple future gaps", func(t *testing.T) { + nm := newNonceManager(20) + + nm.InsertGap(21) + nm.InsertGap(22) + nm.InsertGap(23) + + require.Equal(t, uint64(20), nm.Next()) + require.Equal(t, uint64(21), nm.Next()) + require.Equal(t, uint64(22), nm.Next()) + require.Equal(t, uint64(23), nm.Next()) + require.Equal(t, uint64(24), nm.Next()) + }) } diff --git a/op-service/txintent/bindings/DisputeGameFactory.go b/op-service/txintent/bindings/DisputeGameFactory.go index a04a8ca0b5b31..553120b2039f4 100644 --- a/op-service/txintent/bindings/DisputeGameFactory.go +++ b/op-service/txintent/bindings/DisputeGameFactory.go @@ -30,6 +30,7 @@ type DisputeGameFactory struct { GameCount func() TypedCall[*big.Int] `sol:"gameCount"` GameAtIndex func(index *big.Int) TypedCall[DisputeGame] `sol:"gameAtIndex"` GameImpls func(gameType uint32) TypedCall[common.Address] `sol:"gameImpls"` + GameArgs func(gameType uint32) TypedCall[[]byte] `sol:"gameArgs"` Games func(gameType uint32, rootClaim common.Hash, extraData []byte) TypedCall[struct { Proxy common.Address Timestamp uint64 diff --git a/op-service/txintent/bindings/GasPriceOracle.go b/op-service/txintent/bindings/GasPriceOracle.go new file mode 100644 index 0000000000000..8f08620451b92 --- /dev/null +++ b/op-service/txintent/bindings/GasPriceOracle.go @@ -0,0 +1,24 @@ +package bindings + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type GasPriceOracle struct { + // Read-only functions + BaseFeeScalar func() TypedCall[uint32] `sol:"baseFeeScalar"` + BlobBaseFeeScalar func() TypedCall[uint32] `sol:"blobBaseFeeScalar"` + L1BaseFee func() TypedCall[*eth.ETH] `sol:"l1BaseFee"` + BlobBaseFee func() TypedCall[*eth.ETH] `sol:"blobBaseFee"` + IsFjord func() TypedCall[bool] `sol:"isFjord"` + GetL1Fee func(data []byte) TypedCall[eth.ETH] `sol:"getL1Fee"` + GetL1GasUsed func(data []byte) TypedCall[uint64] `sol:"getL1GasUsed"` + GetL1FeeUpperBound func(unsignedTxSize *big.Int) TypedCall[eth.ETH] `sol:"getL1FeeUpperBound"` +} + +func NewGasPriceOracle(opts ...CallFactoryOption) *GasPriceOracle { + gpo := NewBindings[GasPriceOracle](opts...) + return &gpo +} diff --git a/op-service/txintent/contractio/batch.go b/op-service/txintent/contractio/batch.go new file mode 100644 index 0000000000000..9c622476c3076 --- /dev/null +++ b/op-service/txintent/contractio/batch.go @@ -0,0 +1,73 @@ +package contractio + +import ( + "fmt" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" +) + +// BatchableCall represents a contract read call (eth_call) that can be executed as part of a larger batch of requests +// using `batching.MultiCaller` +type BatchableCall[T any] struct { + typedCall bindings.TypedCall[T] +} + +var _ batching.Call = (*BatchableCall[any])(nil) + +func NewBatchableCall[T any](typedCall bindings.TypedCall[T]) *BatchableCall[T] { + return &BatchableCall[T]{ + typedCall: typedCall, + } +} + +func (c *BatchableCall[T]) ToBatchElemCreator() (batching.BatchElementCreator, error) { + args, err := c.callArgs() + if err != nil { + return nil, err + } + f := func(block rpcblock.Block) (any, rpc.BatchElem) { + out := new(hexutil.Bytes) + return out, rpc.BatchElem{ + Method: "eth_call", + Args: []interface{}{args, block.ArgValue()}, + Result: &out, + } + } + return f, nil +} + +func (c *BatchableCall[T]) HandleResult(result interface{}) (*batching.CallResult, error) { + hex := *result.(*hexutil.Bytes) + out, err := c.typedCall.DecodeOutput(hex) + if err != nil { + return nil, fmt.Errorf("failed to decode output: %w", err) + } + return batching.NewCallResult([]any{out}), nil +} + +func (c *BatchableCall[T]) input() ([]byte, error) { + return c.typedCall.EncodeInputLambda() +} + +func (c *BatchableCall[T]) callArgs() (interface{}, error) { + data, err := c.input() + if err != nil { + return nil, fmt.Errorf("failed to encode input data: %w", err) + } + + to, err := c.typedCall.To() + if err != nil { + return nil, fmt.Errorf("failed to determine contract address: %w", err) + } + + arg := map[string]interface{}{ + "to": to, + "input": hexutil.Bytes(data), + } + return arg, nil +} diff --git a/op-service/txintent/contractio/call.go b/op-service/txintent/contractio/call.go index 99bbdd555b812..0aa6df383b61e 100644 --- a/op-service/txintent/contractio/call.go +++ b/op-service/txintent/contractio/call.go @@ -2,7 +2,11 @@ package contractio import ( "context" + "fmt" + "math/big" + "github.com/ethereum-optimism/optimism/op-service/sources/batching" + "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/common" @@ -49,6 +53,40 @@ func Read[O any](view bindings.TypedCall[O], ctx context.Context, opts ...txplan return decoded, nil } +// ReadArray uses batch calls to load all entries from an array. +func ReadArray[T any](ctx context.Context, caller *batching.MultiCaller, countCall bindings.TypedCall[*big.Int], elemCall func(i *big.Int) bindings.TypedCall[T]) ([]T, error) { + block := rpcblock.Latest + + countResult, err := Read(countCall, ctx) + if err != nil { + return nil, fmt.Errorf("error reading array size: %w", err) + } + + count := countResult.Uint64() + calls := make([]batching.Call, count) + for i := uint64(0); i < count; i++ { + typedCall := elemCall(new(big.Int).SetUint64(i)) + calls[i] = NewBatchableCall(typedCall) + } + + callResults, err := caller.Call(ctx, block, calls...) + if err != nil { + return nil, fmt.Errorf("failed to fetch array data: %w", err) + } + + // Convert results to expected type + var results []T + for _, callResult := range callResults { + result, ok := callResult.Get(0).(T) + if !ok { + return nil, fmt.Errorf("failed to cast result: %v", callResult) + } + results = append(results, result) + } + + return results, nil +} + func Plan[O any](call bindings.TypedCall[O]) (txplan.Option, error) { target, err := call.To() if err != nil { diff --git a/op-service/txmgr/cli.go b/op-service/txmgr/cli.go index 7c384a5a2020b..fa222ce5e0e60 100644 --- a/op-service/txmgr/cli.go +++ b/op-service/txmgr/cli.go @@ -44,6 +44,7 @@ const ( TxNotInMempoolTimeoutFlagName = "txmgr.not-in-mempool-timeout" ReceiptQueryIntervalFlagName = "txmgr.receipt-query-interval" AlreadyPublishedCustomErrsFlagName = "txmgr.already-published-custom-errs" + EnableCellProofsFlagName = "txmgr.enable-cell-proofs" ) var ( @@ -76,6 +77,7 @@ type DefaultFlagValues struct { TxSendTimeout time.Duration TxNotInMempoolTimeout time.Duration ReceiptQueryInterval time.Duration + EnableCellProofs bool } var ( @@ -94,6 +96,7 @@ var ( TxSendTimeout: 0, // Try sending txs indefinitely, to preserve tx ordering for Holocene TxNotInMempoolTimeout: 2 * time.Minute, ReceiptQueryInterval: 12 * time.Second, + EnableCellProofs: false, // Ater Osaka activates on L1, this should be set to true } DefaultChallengerFlagValues = DefaultFlagValues{ NumConfirmations: uint64(3), @@ -238,6 +241,12 @@ func CLIFlagsWithDefaults(envPrefix string, defaults DefaultFlagValues) []cli.Fl Usage: "List of custom RPC error messages that indicate that a transaction has already been published.", EnvVars: prefixEnvVars("TXMGR_ALREADY_PUBLISHED_CUSTOM_ERRS"), }, + &cli.BoolFlag{ + Name: EnableCellProofsFlagName, + Usage: "Enable cell proofs in blob transactions for Fusaka (EIP-7742) compatibility", + Value: false, + EnvVars: prefixEnvVars("TXMGR_ENABLE_CELL_PROOFS"), + }, }, opsigner.CLIFlags(envPrefix, "")...) } @@ -266,6 +275,7 @@ type CLIConfig struct { TxSendTimeout time.Duration TxNotInMempoolTimeout time.Duration AlreadyPublishedCustomErrs []string + EnableCellProofs bool } func NewCLIConfig(l1RPCURL string, defaults DefaultFlagValues) CLIConfig { @@ -285,6 +295,7 @@ func NewCLIConfig(l1RPCURL string, defaults DefaultFlagValues) CLIConfig { TxSendTimeout: defaults.TxSendTimeout, TxNotInMempoolTimeout: defaults.TxNotInMempoolTimeout, ReceiptQueryInterval: defaults.ReceiptQueryInterval, + EnableCellProofs: defaults.EnableCellProofs, SignerCLIConfig: opsigner.NewCLIConfig(), } } @@ -367,6 +378,7 @@ func ReadCLIConfig(ctx *cli.Context) CLIConfig { TxSendTimeout: ctx.Duration(TxSendTimeoutFlagName), TxNotInMempoolTimeout: ctx.Duration(TxNotInMempoolTimeoutFlagName), AlreadyPublishedCustomErrs: ctx.StringSlice(AlreadyPublishedCustomErrsFlagName), + EnableCellProofs: ctx.Bool(EnableCellProofsFlagName), } } @@ -460,6 +472,7 @@ func NewConfig(cfg CLIConfig, l log.Logger) (*Config, error) { res.MinTipCap.Store(minTipCap) res.MaxTipCap.Store(maxTipCap) res.MinBlobTxFee.Store(defaultMinBlobTxFee) + res.EnableCellProofs = cfg.EnableCellProofs return &res, nil } @@ -498,6 +511,10 @@ type Config struct { MinBlobTxFee atomic.Pointer[big.Int] + // EnableCellProofs determines whether to use cell proofs (Version1 sidecars) + // for Fusaka (EIP-7742) compatibility. If false, uses legacy blob proofs (Version0). + EnableCellProofs bool + // ChainID is the chain ID of the L1 chain. ChainID *big.Int diff --git a/op-service/txmgr/estimator.go b/op-service/txmgr/estimator.go index 627e69910443b..0a3aa02694054 100644 --- a/op-service/txmgr/estimator.go +++ b/op-service/txmgr/estimator.go @@ -4,8 +4,6 @@ import ( "context" "errors" "math/big" - - "github.com/ethereum-optimism/optimism/op-service/eth" ) type GasPriceEstimatorFn func(ctx context.Context, backend ETHBackend) (*big.Int, *big.Int, *big.Int, error) @@ -24,9 +22,9 @@ func DefaultGasPriceEstimatorFn(ctx context.Context, backend ETHBackend) (*big.I return nil, nil, nil, errors.New("txmgr does not support pre-london blocks that do not have a base fee") } - var blobFee *big.Int - if head.ExcessBlobGas != nil { - blobFee = eth.CalcBlobFeeDefault(head) + blobFee, err := backend.BlobBaseFee(ctx) + if err != nil { + return nil, nil, nil, err } return tip, head.BaseFee, blobFee, nil diff --git a/op-service/txmgr/queue_test.go b/op-service/txmgr/queue_test.go index 29c372365faed..81fd87aeaa6f3 100644 --- a/op-service/txmgr/queue_test.go +++ b/op-service/txmgr/queue_test.go @@ -56,6 +56,10 @@ func (b *mockBackendWithNonce) NonceAt(ctx context.Context, account common.Addre return uint64(len(b.minedTxs)), nil } +func (b *mockBackendWithNonce) BlobBaseFee(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + func TestQueue_Send(t *testing.T) { testCases := []struct { name string // name of the test @@ -281,6 +285,8 @@ func newMockBackendWithConfirmationDelay(g *gasPricer, wg *sync.WaitGroup) *mock b.g = g sendTx := func(ctx context.Context, tx *types.Transaction) error { + b.mu.Lock() + defer b.mu.Unlock() _, exists := b.cachedTxs[tx.Hash()] if !exists { b.cachedTxs[tx.Hash()] = tx @@ -300,6 +306,10 @@ func (b *mockBackendWithConfirmationDelay) MineAll() { } } +func (b *mockBackendWithConfirmationDelay) BlobBaseFee(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + // Simple test that we can call q.Send() up to the maxPending limit without blocking. func TestQueue_Send_MaxPendingMetrics(t *testing.T) { maxPending := 5 diff --git a/op-service/txmgr/test_txmgr.go b/op-service/txmgr/test_txmgr.go index 9c4dc5e0f335d..6d5c00d88341d 100644 --- a/op-service/txmgr/test_txmgr.go +++ b/op-service/txmgr/test_txmgr.go @@ -52,7 +52,7 @@ func (m *TestTxManager) makeStuckTx(ctx context.Context, candidate TxCandidate) var sidecar *types.BlobTxSidecar var blobHashes []common.Hash if len(candidate.Blobs) > 0 { - if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs); err != nil { + if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs, m.cfg.EnableCellProofs); err != nil { return nil, err } } diff --git a/op-service/txmgr/txmgr.go b/op-service/txmgr/txmgr.go index 74400cba47568..35ee2aaacce67 100644 --- a/op-service/txmgr/txmgr.go +++ b/op-service/txmgr/txmgr.go @@ -119,6 +119,7 @@ type ETHBackend interface { // TODO: Maybe need a generic interface to support different RPC providers HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) SuggestGasTipCap(ctx context.Context) (*big.Int, error) + BlobBaseFee(ctx context.Context) (*big.Int, error) // NonceAt returns the account nonce of the given account. // The block number can be nil, in which case the nonce is taken from the latest known block. NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) @@ -208,7 +209,7 @@ func (m *SimpleTxManager) Close() { } func (m *SimpleTxManager) txLogger(tx *types.Transaction, logGas bool) log.Logger { - fields := []any{"tx", tx.Hash(), "nonce", tx.Nonce()} + fields := []any{"tx", tx.Hash().Hex(), "nonce", tx.Nonce()} if logGas { fields = append(fields, "gasTipCap", tx.GasTipCap(), "gasFeeCap", tx.GasFeeCap(), "gasLimit", tx.Gas()) } @@ -363,7 +364,8 @@ func (m *SimpleTxManager) craftTx(ctx context.Context, candidate TxCandidate) (* if candidate.To == nil { return nil, errors.New("blob txs cannot deploy contracts") } - if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs); err != nil { + // Use configuration to determine whether to enable cell proofs + if sidecar, blobHashes, err = MakeSidecar(candidate.Blobs, m.cfg.EnableCellProofs); err != nil { return nil, fmt.Errorf("failed to make sidecar: %w", err) } } @@ -491,10 +493,23 @@ func (m *SimpleTxManager) SetBumpFeeRetryTime(val time.Duration) { } // MakeSidecar builds & returns the BlobTxSidecar and corresponding blob hashes from the raw blob -// data. -func MakeSidecar(blobs []*eth.Blob) (*types.BlobTxSidecar, []common.Hash, error) { - sidecar := &types.BlobTxSidecar{} +// data with configurable cell proof support. +func MakeSidecar(blobs []*eth.Blob, enableCellProofs bool) (*types.BlobTxSidecar, []common.Hash, error) { + var sidecar *types.BlobTxSidecar + if enableCellProofs { + sidecar = &types.BlobTxSidecar{ + Proofs: make([]kzg4844.Proof, 0, len(blobs)*kzg4844.CellProofsPerBlob), + Version: types.BlobSidecarVersion1, // Use Version1 for cell proofs (Fusaka compatibility) + } + } else { + sidecar = &types.BlobTxSidecar{ + Proofs: make([]kzg4844.Proof, 0, len(blobs)), + Version: types.BlobSidecarVersion0, // Use Version0 for legacy blob proofs + } + } + blobHashes := make([]common.Hash, 0, len(blobs)) + for i, blob := range blobs { rawBlob := blob.KZGBlob() sidecar.Blobs = append(sidecar.Blobs, *rawBlob) @@ -503,13 +518,24 @@ func MakeSidecar(blobs []*eth.Blob) (*types.BlobTxSidecar, []common.Hash, error) return nil, nil, fmt.Errorf("cannot compute KZG commitment of blob %d in tx candidate: %w", i, err) } sidecar.Commitments = append(sidecar.Commitments, commitment) - proof, err := kzg4844.ComputeBlobProof(rawBlob, commitment) - if err != nil { - return nil, nil, fmt.Errorf("cannot compute KZG proof for fast commitment verification of blob %d in tx candidate: %w", i, err) - } - sidecar.Proofs = append(sidecar.Proofs, proof) blobHashes = append(blobHashes, eth.KZGToVersionedHash(commitment)) + if enableCellProofs { + // Version1: Use cell proofs for Fusaka compatibility + cellProofs, err := kzg4844.ComputeCellProofs(rawBlob) + if err != nil { + return nil, nil, fmt.Errorf("cannot compute KZG cell proofs for blob %d in tx candidate: %w", i, err) + } + sidecar.Proofs = append(sidecar.Proofs, cellProofs...) + } else { + // Version0: Use legacy blob proofs + proof, err := kzg4844.ComputeBlobProof(rawBlob, sidecar.Commitments[i]) + if err != nil { + return nil, nil, fmt.Errorf("cannot compute KZG proof for fast commitment verification of blob %d in tx candidate: %w", i, err) + } + sidecar.Proofs = append(sidecar.Proofs, proof) + } } + return sidecar, blobHashes, nil } @@ -815,8 +841,11 @@ func (m *SimpleTxManager) queryReceipt(ctx context.Context, txHash common.Hash, } m.metr.RecordBaseFee(tip.BaseFee) - if tip.ExcessBlobGas != nil { - blobFee := eth.CalcBlobFeeDefault(tip) + + if blobFee, err := m.backend.BlobBaseFee(ctx); err != nil { + m.metr.RPCError() + m.l.Warn("Unable to fetch blob base fee", "err", err) + } else { m.metr.RecordBlobBaseFee(blobFee) } diff --git a/op-service/txmgr/txmgr_test.go b/op-service/txmgr/txmgr_test.go index f025c11a321e7..84747bf436eaf 100644 --- a/op-service/txmgr/txmgr_test.go +++ b/op-service/txmgr/txmgr_test.go @@ -131,7 +131,7 @@ type gasPricer struct { mineAtEpoch int64 baseGasTipFee *big.Int baseBaseFee *big.Int - excessBlobGas uint64 + blobBaseFee *big.Int err error mu sync.Mutex } @@ -146,9 +146,7 @@ func newGasPricer(mineAtEpoch int64) *gasPricer { mineAtEpoch: mineAtEpoch, baseGasTipFee: big.NewInt(baseGasTipFee), baseBaseFee: big.NewInt(baseBaseFee), - // Simulate 100 excess blobs, which results in a blobBaseFee of 50 wei. This default means - // blob txs will be subject to the geth minimum blobgas fee of 1 gwei. - excessBlobGas: 100 * (params.BlobTxBlobGasPerBlob), + blobBaseFee: big.NewInt(50), } } @@ -158,9 +156,8 @@ func (g *gasPricer) expGasFeeCap() *big.Int { } func (g *gasPricer) expBlobFeeCap() *big.Int { - _, _, excessBlobGas := g.feesForEpoch(g.mineAtEpoch) - // Needs to be adjusted when Prague gas pricing is needed. - return eth.CalcBlobFeeCancun(excessBlobGas) + _, _, blobBaseFee := g.feesForEpoch(g.mineAtEpoch) + return blobBaseFee } func (g *gasPricer) shouldMine(gasFeeCap *big.Int) bool { @@ -171,13 +168,14 @@ func (g *gasPricer) shouldMineBlobTx(gasFeeCap, blobFeeCap *big.Int) bool { return g.shouldMine(gasFeeCap) && g.expBlobFeeCap().Cmp(blobFeeCap) <= 0 } -func (g *gasPricer) feesForEpoch(epoch int64) (*big.Int, *big.Int, uint64) { +func (g *gasPricer) feesForEpoch(epoch int64) (*big.Int, *big.Int, *big.Int) { e := big.NewInt(epoch) epochBaseFee := new(big.Int).Mul(g.baseBaseFee, e) epochGasTipCap := new(big.Int).Mul(g.baseGasTipFee, e) epochGasFeeCap := calcGasFeeCap(epochBaseFee, epochGasTipCap) - epochExcessBlobGas := g.excessBlobGas * uint64(epoch) - return epochGasTipCap, epochGasFeeCap, epochExcessBlobGas + epochBlobBaseFee := new(big.Int).Mul(g.blobBaseFee, new(big.Int).Exp(big.NewInt(2), e, nil)) + + return epochGasTipCap, epochGasFeeCap, epochBlobBaseFee } func (g *gasPricer) baseFee() *big.Int { @@ -186,20 +184,14 @@ func (g *gasPricer) baseFee() *big.Int { return new(big.Int).Mul(g.baseBaseFee, big.NewInt(g.epoch)) } -func (g *gasPricer) excessblobgas() uint64 { - g.mu.Lock() - defer g.mu.Unlock() - return g.excessBlobGas * uint64(g.epoch) -} - -func (g *gasPricer) sample() (*big.Int, *big.Int, uint64) { +func (g *gasPricer) sample() (*big.Int, *big.Int, *big.Int) { g.mu.Lock() defer g.mu.Unlock() g.epoch++ - epochGasTipCap, epochGasFeeCap, epochExcessBlobGas := g.feesForEpoch(g.epoch) + epochGasTipCap, epochGasFeeCap, epochBlobBaseFee := g.feesForEpoch(g.epoch) - return epochGasTipCap, epochGasFeeCap, epochExcessBlobGas + return epochGasTipCap, epochGasFeeCap, epochBlobBaseFee } type minedTxInfo struct { @@ -274,11 +266,9 @@ func (b *mockBackend) HeaderByNumber(ctx context.Context, number *big.Int) (*typ if number != nil { num.Set(number) } - bg := b.g.excessblobgas() return &types.Header{ - Number: num, - BaseFee: b.g.baseFee(), - ExcessBlobGas: &bg, + Number: num, + BaseFee: b.g.baseFee(), }, nil } @@ -346,6 +336,10 @@ func (b *mockBackend) TransactionReceipt(ctx context.Context, txHash common.Hash func (b *mockBackend) Close() { } +func (b *mockBackend) BlobBaseFee(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + type testSendVariantsFn func(ctx context.Context, h *testHarness, tx TxCandidate) (*types.Receipt, error) func testSendVariants(t *testing.T, testFn func(t *testing.T, send testSendVariantsFn)) { @@ -511,9 +505,7 @@ func TestTxMgrConfirmsBlobTxAtHigherGasPrice(t *testing.T) { h := newTestHarness(t) - gasTipCap, gasFeeCap, excessBlobGas := h.gasPricer.sample() - // Needs to be adjusted when testing with Prague activated on L1. - blobFeeCap := eth.CalcBlobFeeCancun(excessBlobGas) + gasTipCap, gasFeeCap, blobFeeCap := h.gasPricer.sample() t.Log("Blob fee cap:", blobFeeCap, "gasFeeCap:", gasFeeCap) tx := types.NewTx(&types.BlobTx{ @@ -1016,11 +1008,10 @@ func TestManagerErrsOnZeroConfs(t *testing.T) { // first call but a success on the second call. This allows us to test that the // inner loop of WaitMined properly handles this case. type failingBackend struct { - returnSuccessBlockNumber bool - returnSuccessHeader bool - returnSuccessReceipt bool - baseFee, gasTip *big.Int - excessBlobGas *uint64 + returnSuccessBlockNumber bool + returnSuccessHeader bool + returnSuccessReceipt bool + baseFee, gasTip, blobBaseFee *big.Int } // BlockNumber for the failingBackend returns errRpcFailure on the first @@ -1057,9 +1048,8 @@ func (b *failingBackend) HeaderByNumber(ctx context.Context, _ *big.Int) (*types } return &types.Header{ - Number: big.NewInt(1), - BaseFee: b.baseFee, - ExcessBlobGas: b.excessBlobGas, + Number: big.NewInt(1), + BaseFee: b.baseFee, }, nil } @@ -1094,6 +1084,10 @@ func (b *failingBackend) ChainID(ctx context.Context) (*big.Int, error) { func (b *failingBackend) Close() { } +func (b *failingBackend) BlobBaseFee(ctx context.Context) (*big.Int, error) { + return b.blobBaseFee, nil +} + // TestWaitMinedReturnsReceiptAfterFailure asserts that WaitMined is able to // recover from failed calls to the backend. It uses the failedBackend to // simulate an rpc call failure, followed by the successful return of a receipt. @@ -1315,12 +1309,10 @@ func testIncreaseGasPriceLimit(t *testing.T, lt gasPriceLimitTest) { borkedTip := int64(10) borkedFee := int64(45) - // simulate 100 excess blobs which yields a 50 wei blob base fee - borkedExcessBlobGas := uint64(100 * params.BlobTxBlobGasPerBlob) borkedBackend := failingBackend{ gasTip: big.NewInt(borkedTip), baseFee: big.NewInt(borkedFee), - excessBlobGas: &borkedExcessBlobGas, + blobBaseFee: big.NewInt(50), returnSuccessHeader: true, } @@ -1736,7 +1728,9 @@ func TestMakeSidecar(t *testing.T) { for i := 0; i < 4096; i++ { blob[32*i] &= 0b0011_1111 } - sidecar, hashes, err := MakeSidecar([]*eth.Blob{&blob}) + + // Pre Fusaka, blob proof sidecar is Version0 + sidecar, hashes, err := MakeSidecar([]*eth.Blob{&blob}, false) require.NoError(t, err) require.Equal(t, len(hashes), 1) require.Equal(t, len(sidecar.Blobs), len(hashes)) @@ -1747,6 +1741,19 @@ func TestMakeSidecar(t *testing.T) { require.NoError(t, eth.VerifyBlobProof((*eth.Blob)(&sidecar.Blobs[i]), commit, sidecar.Proofs[i]), "proof must be valid") require.Equal(t, hashes[i], eth.KZGToVersionedHash(commit)) } + + // Post Fusaka, blob proof sidecar is Version1 + sidecar, hashes, err = MakeSidecar([]*eth.Blob{&blob}, true) + require.NoError(t, err) + require.Equal(t, len(hashes), 1) + require.Equal(t, len(sidecar.Blobs), len(hashes)) + require.Equal(t, len(sidecar.Proofs), len(hashes)*kzg4844.CellProofsPerBlob) + require.Equal(t, len(sidecar.Commitments), len(hashes)) + + require.NoError(t, kzg4844.VerifyCellProofs(sidecar.Blobs, sidecar.Commitments, sidecar.Proofs), "cell proof must be valid") + for i, commit := range sidecar.Commitments { + require.Equal(t, hashes[i], eth.KZGToVersionedHash(commit)) + } } func TestSendAsyncUnbufferedChan(t *testing.T) { diff --git a/op-service/txplan/txplan.go b/op-service/txplan/txplan.go index 67d293a3353d4..d4151c03dfc1c 100644 --- a/op-service/txplan/txplan.go +++ b/op-service/txplan/txplan.go @@ -8,6 +8,7 @@ import ( "math/big" "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum-optimism/optimism/op-service/txmgr" "github.com/holiman/uint256" "github.com/ethereum/go-ethereum" @@ -16,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/plan" @@ -53,6 +55,9 @@ type PlannedTx struct { Value plan.Lazy[*big.Int] AccessList plan.Lazy[types.AccessList] // resolves to nil if not an attribute AuthList plan.Lazy[[]types.SetCodeAuthorization] // resolves to nil if not a 7702 tx + BlobFeeCap plan.Lazy[*uint256.Int] // resolves to nil if not a blob tx + BlobHashes plan.Lazy[[]common.Hash] // resolves to nil if not a blob tx + Sidecar plan.Lazy[*types.BlobTxSidecar] // resolves to nil if not a blob tx } func (ptx *PlannedTx) String() string { @@ -83,9 +88,9 @@ func WithTo(to *common.Address) Option { } } -func WithValue(val *big.Int) Option { +func WithValue(val eth.ETH) Option { return func(tx *PlannedTx) { - tx.Value.Set(val) + tx.Value.Set(val.ToBig()) } } @@ -134,6 +139,24 @@ func WithAuthorizations(auths []types.SetCodeAuthorization) Option { } } +func WithAuthorizationTo(codeAddr common.Address) Option { + return func(tx *PlannedTx) { + tx.AuthList.DependOn(&tx.Nonce, &tx.ChainID, &tx.Priv) + tx.AuthList.Fn(func(ctx context.Context) ([]types.SetCodeAuthorization, error) { + auth1, err := types.SignSetCode(tx.Priv.Value(), types.SetCodeAuthorization{ + ChainID: *uint256.MustFromBig(tx.ChainID.Value().ToBig()), + Address: codeAddr, + // before the nonce is compared with the authorization in the EVM, it is incremented by 1 + Nonce: tx.Nonce.Value() + 1, + }) + if err != nil { + return nil, fmt.Errorf("failed to sign 7702 authorization: %w", err) + } + return []types.SetCodeAuthorization{auth1}, nil + }) + } +} + func WithType(t uint8) Option { return func(tx *PlannedTx) { tx.Type.Set(t) @@ -142,6 +165,9 @@ func WithType(t uint8) Option { func WithGasLimit(limit uint64) Option { return func(tx *PlannedTx) { + // The gas limit is explicitly set so remove any dependencies which may have been added by a previous call + // to WithEstimator. + tx.Gas.ResetFnAndDependencies() tx.Gas.Set(limit) } } @@ -314,7 +340,7 @@ func WithAgainstLatestBlock(cl AgainstLatestBlock) Option { // Reader uses eth_call to view(read) the blockchain, and does not write persistent changes to the chain. // A call will return a byte string (that may be ABI-decoded), and does not have a receipt, as it was only simulated and not a persistent transaction. type Reader interface { - Call(ctx context.Context, msg ethereum.CallMsg) ([]byte, error) + Call(ctx context.Context, msg ethereum.CallMsg, blockNumber rpc.BlockNumber) ([]byte, error) } func WithReader(cl Reader) Option { @@ -327,6 +353,7 @@ func WithReader(cl Reader) Option { &tx.Value, &tx.Data, &tx.AccessList, + &tx.AgainstBlock, ) tx.Read.Fn(func(ctx context.Context) ([]byte, error) { msg := ethereum.CallMsg{ @@ -340,7 +367,7 @@ func WithReader(cl Reader) Option { Data: tx.Data.Value(), AccessList: tx.AccessList.Value(), } - return cl.Call(ctx, msg) + return cl.Call(ctx, msg, rpc.BlockNumber(tx.AgainstBlock.Value().NumberU64())) }) } } @@ -361,6 +388,29 @@ func WithChainID(cl ChainID) Option { } } +func WithBlobs(blobs []*eth.Blob, config *params.ChainConfig) Option { + return func(tx *PlannedTx) { + tx.Type.Set(types.BlobTxType) + tx.BlobFeeCap.DependOn(&tx.AgainstBlock) + tx.BlobFeeCap.Fn(func(_ context.Context) (*uint256.Int, error) { + return uint256.MustFromBig(tx.AgainstBlock.Value().BlobBaseFee(config)), nil + }) + var blobHashes []common.Hash + tx.Sidecar.Fn(func(_ context.Context) (*types.BlobTxSidecar, error) { + sidecar, hashes, err := txmgr.MakeSidecar(blobs, true) + if err != nil { + return nil, fmt.Errorf("make blob tx sidecar: %w", err) + } + blobHashes = hashes + return sidecar, nil + }) + tx.BlobHashes.DependOn(&tx.Sidecar) + tx.BlobHashes.Fn(func(_ context.Context) ([]common.Hash, error) { + return blobHashes, nil + }) + } +} + func (tx *PlannedTx) Defaults() { tx.Type.Set(types.DynamicFeeTxType) tx.To.Set(nil) @@ -398,6 +448,10 @@ func (tx *PlannedTx) Defaults() { return crypto.PubkeyToAddress(tx.Priv.Value().PublicKey), nil }) + tx.BlobFeeCap.Set(nil) + tx.BlobHashes.Set(nil) + tx.Sidecar.Set(nil) + // Automatically build tx from the individual attributes tx.Unsigned.DependOn( &tx.Sender, @@ -412,6 +466,9 @@ func (tx *PlannedTx) Defaults() { &tx.Value, &tx.AccessList, &tx.AuthList, + &tx.BlobFeeCap, + &tx.BlobHashes, + &tx.Sidecar, ) tx.Unsigned.Fn(func(ctx context.Context) (types.TxData, error) { chainID := tx.ChainID.Value() @@ -478,7 +535,23 @@ func (tx *PlannedTx) Defaults() { S: nil, }, nil case types.BlobTxType: - return nil, errors.New("blob tx not supported") + return &types.BlobTx{ + ChainID: uint256.MustFromBig(chainID.ToBig()), + Nonce: tx.Nonce.Value(), + GasTipCap: uint256.MustFromBig(tx.GasTipCap.Value()), + GasFeeCap: uint256.MustFromBig(tx.GasFeeCap.Value()), + Gas: tx.Gas.Value(), + To: *tx.To.Value(), + Value: uint256.MustFromBig(tx.Value.Value()), + Data: tx.Data.Value(), + AccessList: tx.AccessList.Value(), + BlobFeeCap: tx.BlobFeeCap.Value(), + BlobHashes: tx.BlobHashes.Value(), + Sidecar: tx.Sidecar.Value(), + V: nil, + R: nil, + S: nil, + }, nil case types.DepositTxType: return nil, errors.New("deposit tx not supported") default: @@ -503,7 +576,7 @@ func (tx *PlannedTx) Defaults() { if rec.Status == types.ReceiptStatusSuccessful { return struct{}{}, nil } else { - return struct{}{}, errors.New("tx failed") + return struct{}{}, fmt.Errorf("tx failed with status %v (%v of %v gas used)", rec.Status, rec.GasUsed, tx.Gas.Value()) } }) } diff --git a/op-service/txplan/txplan_test.go b/op-service/txplan/txplan_test.go index ab889a3868b12..b1a3e510b4a1f 100644 --- a/op-service/txplan/txplan_test.go +++ b/op-service/txplan/txplan_test.go @@ -16,7 +16,7 @@ func TestPlannedTx_Defaults(t *testing.T) { key, err := crypto.GenerateKey() require.NoError(t, err) - ptx := NewPlannedTx(WithPrivateKey(key), WithValue(big.NewInt(123))) + ptx := NewPlannedTx(WithPrivateKey(key), WithValue(eth.WeiU64(123))) t.Log("tx", ptx.Signed.String()) block := types.NewBlock(&types.Header{BaseFee: big.NewInt(7e9)}, nil, nil, nil, types.DefaultBlockConfig) diff --git a/op-supervisor/supervisor/backend/backend.go b/op-supervisor/supervisor/backend/backend.go index 0a4c3d4ffbdad..50deaf133a0be 100644 --- a/op-supervisor/supervisor/backend/backend.go +++ b/op-supervisor/supervisor/backend/backend.go @@ -204,10 +204,13 @@ func (su *SupervisorBackend) OnEvent(ctx context.Context, ev event.Event) bool { // don't process events of the activation block return true } - su.emitter.Emit(ctx, superevents.ChainProcessEvent{ - ChainID: x.ChainID, - Target: x.NewLocalUnsafe.Number, - }) + + cp, ok := su.chainProcessors.Get(x.ChainID) + if !ok { + su.logger.Error("chain processor not found", "chainID", x.ChainID) + return false + } + cp.ProcessChain(x.NewLocalUnsafe.Number) case superevents.LocalUnsafeUpdateEvent: su.emitter.Emit(ctx, superevents.UpdateCrossUnsafeRequestEvent{ ChainID: x.ChainID, @@ -227,10 +230,12 @@ func (su *SupervisorBackend) OnEvent(ctx context.Context, ev event.Event) bool { }) } case superevents.LocalSafeUpdateEvent: - su.emitter.Emit(ctx, superevents.ChainProcessEvent{ - ChainID: x.ChainID, - Target: x.NewLocalSafe.Derived.Number, - }) + cp, ok := su.chainProcessors.Get(x.ChainID) + if !ok { + su.logger.Error("chain processor not found", "chainID", x.ChainID) + return false + } + cp.ProcessChain(x.NewLocalSafe.Derived.Number) su.emitter.Emit(ctx, superevents.UpdateCrossSafeRequestEvent{ ChainID: x.ChainID, }) diff --git a/op-supervisor/supervisor/backend/backend_test.go b/op-supervisor/supervisor/backend/backend_test.go index c1e7bcf68363a..ca5035bc7c09c 100644 --- a/op-supervisor/supervisor/backend/backend_test.go +++ b/op-supervisor/supervisor/backend/backend_test.go @@ -340,11 +340,13 @@ func TestBackendCallsMetrics(t *testing.T) { fullCfgSet := fullConfigSet(t, 1) cfg := &config.Config{ - Version: "test", - LogConfig: oplog.CLIConfig{}, - MetricsConfig: opmetrics.CLIConfig{}, - PprofConfig: oppprof.CLIConfig{}, - RPC: oprpc.CLIConfig{}, + Version: "test", + LogConfig: oplog.CLIConfig{}, + MetricsConfig: opmetrics.CLIConfig{}, + PprofConfig: oppprof.CLIConfig{}, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, FullConfigSetSource: fullCfgSet, SynchronousProcessors: true, MockRun: false, @@ -535,11 +537,13 @@ func TestAsyncVerifyAccessWithRPC(t *testing.T) { // Initialize backend with mock metrics cfg := &config.Config{ - Version: "test", - LogConfig: oplog.CLIConfig{}, - MetricsConfig: opmetrics.CLIConfig{}, - PprofConfig: oppprof.CLIConfig{}, - RPC: oprpc.CLIConfig{}, + Version: "test", + LogConfig: oplog.CLIConfig{}, + MetricsConfig: opmetrics.CLIConfig{}, + PprofConfig: oppprof.CLIConfig{}, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, FullConfigSetSource: fullCfgSet, SynchronousProcessors: true, MockRun: false, diff --git a/op-supervisor/supervisor/backend/processors/chain_processor.go b/op-supervisor/supervisor/backend/processors/chain_processor.go index 92f31ceff725b..05164a604a430 100644 --- a/op-supervisor/supervisor/backend/processors/chain_processor.go +++ b/op-supervisor/supervisor/backend/processors/chain_processor.go @@ -107,20 +107,15 @@ func (s *ChainProcessor) nextNum() uint64 { return headNum + 1 } +func (s *ChainProcessor) ProcessChain(target uint64) { + s.UpdateTarget(target) + if s.running.CompareAndSwap(false, true) { + s.index() + } +} + func (s *ChainProcessor) OnEvent(ctx context.Context, ev event.Event) bool { switch x := ev.(type) { - case superevents.ChainProcessEvent: - if x.ChainID != s.chain { - return false - } - // always update the target - s.UpdateTarget(x.Target) - - // and if not already running, begin indexing - if s.running.CompareAndSwap(false, true) { - s.index() - } - case superevents.ChainIndexingContinueEvent: if x.ChainID != s.chain { return false diff --git a/op-supervisor/supervisor/backend/superevents/events.go b/op-supervisor/supervisor/backend/superevents/events.go index 62a89f8795330..2104a2301667f 100644 --- a/op-supervisor/supervisor/backend/superevents/events.go +++ b/op-supervisor/supervisor/backend/superevents/events.go @@ -5,15 +5,6 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -type ChainProcessEvent struct { - ChainID eth.ChainID - Target uint64 -} - -func (ev ChainProcessEvent) String() string { - return "chain-process" -} - type UpdateCrossUnsafeRequestEvent struct { ChainID eth.ChainID } diff --git a/op-supervisor/supervisor/service_test.go b/op-supervisor/supervisor/service_test.go index 4c9ad00e47796..c82694ee794db 100644 --- a/op-supervisor/supervisor/service_test.go +++ b/op-supervisor/supervisor/service_test.go @@ -12,7 +12,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-service/dial" + opclient "github.com/ethereum-optimism/optimism/op-service/client" + oplog "github.com/ethereum-optimism/optimism/op-service/log" opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" "github.com/ethereum-optimism/optimism/op-service/oppprof" @@ -66,7 +67,7 @@ func TestSupervisorService(t *testing.T) { { endpoint := "http://" + supervisor.rpcServer.Endpoint() t.Logf("dialing %s", endpoint) - cl, err := dial.DialRPCClientWithTimeout(context.Background(), time.Second*5, logger, endpoint) + cl, err := opclient.NewRPC(context.Background(), logger, endpoint, opclient.WithConnectTimeout(5*time.Second)) require.NoError(t, err) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) err = cl.CallContext(ctx, nil, "supervisor_checkAccessList", diff --git a/op-sync-tester/example_config.yaml b/op-sync-tester/example_config.yaml index 121fd6a60bb0b..a690bcb994417 100644 --- a/op-sync-tester/example_config.yaml +++ b/op-sync-tester/example_config.yaml @@ -1,7 +1,10 @@ synctesters: local: chain_id: 2151908 - el_rpc: http://localhost:32988/ + el_rpc: http://localhost:65293/ sepolia: chain_id: 11155420 - el_rpc: https://sepolia.optimism.io + el_rpc: https://sepolia.optimism.io + mainnet: + chain_id: 10 + el_rpc: https://mainnet.optimism.io diff --git a/op-sync-tester/synctester/backend/backend.go b/op-sync-tester/synctester/backend/backend.go index 80ae7b04b0e9c..3743231519a2e 100644 --- a/op-sync-tester/synctester/backend/backend.go +++ b/op-sync-tester/synctester/backend/backend.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "sort" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" @@ -18,28 +17,6 @@ import ( sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" ) -type sessionKeyType struct{} - -var ctxKeySession = sessionKeyType{} - -// WithSession returns a new context with the given Session. -func WithSession(ctx context.Context, s *Session) context.Context { - return context.WithValue(ctx, ctxKeySession, s) -} - -// SessionFromContext retrieves the Session from the context, if present. -func SessionFromContext(ctx context.Context) (*Session, bool) { - s, ok := ctx.Value(ctxKeySession).(*Session) - return s, ok -} - -type Session struct { - SessionID string - Latest uint64 - Safe uint64 - Finalized uint64 -} - type APIRouter interface { AddRPC(route string) error AddAPIToRPC(route string, api rpc.API) error @@ -63,20 +40,14 @@ func FromConfig(log log.Logger, m metrics.Metricer, cfg *config.Config, router A log: log, m: m, } - var syncTesterIDs []sttypes.SyncTesterID + for stID, stCfg := range cfg.SyncTesters { st, err := SyncTesterFromConfig(log, m, stID, stCfg) if err != nil { return nil, fmt.Errorf("failed to setup sync tester %q: %w", stID, err) } b.syncTesters.Set(stID, st) - syncTesterIDs = append(syncTesterIDs, stID) } - // Infer defaults for chains that were not explicitly mentioned. - // Always use the lowest sync tester ID, so map-iteration doesn't affect defaults. - sort.Slice(syncTesterIDs, func(i, j int) bool { - return syncTesterIDs[i] < syncTesterIDs[j] - }) // Set up the sync tester routes var syncTesterErr error b.syncTesters.Range(func(id sttypes.SyncTesterID, st *SyncTester) bool { diff --git a/op-sync-tester/synctester/backend/backend_test.go b/op-sync-tester/synctester/backend/backend_test.go index b309ad78b59fc..142b82f032c10 100644 --- a/op-sync-tester/synctester/backend/backend_test.go +++ b/op-sync-tester/synctester/backend/backend_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" @@ -49,12 +50,14 @@ func TestBackend(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) syncTesterCfgA := &stconf.SyncTesterEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL("http://" + srv.Endpoint())}, + ELRPC: endpoint.MustRPC{Value: endpoint.URL("http://" + srv.Endpoint())}, + ChainID: eth.ChainIDFromUInt64(1), } syncTesterA := sttypes.SyncTesterID("syncTesterA") syncTesterCfgB := &stconf.SyncTesterEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL("http://" + srv.Endpoint())}, + ELRPC: endpoint.MustRPC{Value: endpoint.URL("http://" + srv.Endpoint())}, + ChainID: eth.ChainIDFromUInt64(2), } syncTesterB := sttypes.SyncTesterID("syncTesterB") diff --git a/op-sync-tester/synctester/backend/el_reader.go b/op-sync-tester/synctester/backend/el_reader.go new file mode 100644 index 0000000000000..ee7ad6e8a442e --- /dev/null +++ b/op-sync-tester/synctester/backend/el_reader.go @@ -0,0 +1,82 @@ +package backend + +import ( + "context" + "encoding/json" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" +) + +// ReadOnlyELBackend defines the minimal, read-only execution layer +// interface used by the sync tester and its mock backends. +// The interface exposes two flavors of block accessors: +// - JSON-returning methods (GetBlockByNumberJSON, GetBlockByHashJSON) +// which return the raw RPC payload exactly as delivered by the EL. +// These are useful for relaying the response from read-only exec layer directly +// - Typed methods (GetBlockByNumber, GetBlockByHash) which decode +// the RPC response into geth *types.Block for structured +// inspection in code. +// - Additional helpers include GetBlockReceipts and ChainId +// +// Implementation wraps ethclient.Client to forward RPC +// calls. For testing, a mock implementation can be provided to return +// deterministic values without requiring a live execution layer node. +type ReadOnlyELBackend interface { + GetBlockByNumberJSON(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) + GetBlockByHashJSON(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) + GetBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) + GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) + ChainId(ctx context.Context) (hexutil.Big, error) +} + +var _ ReadOnlyELBackend = (*ELReader)(nil) + +type ELReader struct { + c *ethclient.Client +} + +func NewELReader(c *ethclient.Client) *ELReader { + return &ELReader{c: c} +} + +func (g *ELReader) GetBlockByNumberJSON(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) { + var raw json.RawMessage + if err := g.c.Client().CallContext(ctx, &raw, "eth_getBlockByNumber", number, fullTx); err != nil { + return nil, err + } + return raw, nil +} + +func (g *ELReader) GetBlockByHashJSON(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) { + var raw json.RawMessage + if err := g.c.Client().CallContext(ctx, &raw, "eth_getBlockByHash", hash, fullTx); err != nil { + return nil, err + } + return raw, nil +} + +func (g *ELReader) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + return g.c.BlockByNumber(ctx, big.NewInt(number.Int64())) +} + +func (g *ELReader) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return g.c.BlockByHash(ctx, hash) +} + +func (g *ELReader) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + return g.c.BlockReceipts(ctx, blockNrOrHash) +} + +func (g *ELReader) ChainId(ctx context.Context) (hexutil.Big, error) { + chainID, err := g.c.ChainID(ctx) + if err != nil { + return hexutil.Big{}, err + } + return hexutil.Big(*chainID), nil +} diff --git a/op-sync-tester/synctester/backend/session/session.go b/op-sync-tester/synctester/backend/session/session.go new file mode 100644 index 0000000000000..10da7b97e0be5 --- /dev/null +++ b/op-sync-tester/synctester/backend/session/session.go @@ -0,0 +1,112 @@ +package session + +import ( + "context" + "fmt" + "sort" + "sync" + + "github.com/ethereum-optimism/optimism/op-service/eth" + + "github.com/ethereum/go-ethereum/log" +) + +type SessionManager struct { + sync.Mutex + sessions map[string]*eth.SyncTesterSession + deletedSessionIDs map[string]struct{} + + log log.Logger +} + +type sessionKeyType struct{} + +var ctxKeySession = sessionKeyType{} + +// WithSyncTesterSession returns a new context with the given Session. +func WithSyncTesterSession(ctx context.Context, s *eth.SyncTesterSession) context.Context { + return context.WithValue(ctx, ctxKeySession, s) +} + +// SyncTesterSessionFromContext retrieves the Session from the context, if present. +func SyncTesterSessionFromContext(ctx context.Context) (*eth.SyncTesterSession, bool) { + s, ok := ctx.Value(ctxKeySession).(*eth.SyncTesterSession) + return s, ok +} + +func NewSessionManager(logger log.Logger) *SessionManager { + return &SessionManager{log: logger, + sessions: make(map[string]*eth.SyncTesterSession), + deletedSessionIDs: make(map[string]struct{}), + } +} + +func (s *SessionManager) SessionIDs() []string { + s.Lock() + defer s.Unlock() + keys := make([]string, 0) + for key := range s.sessions { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +func (s *SessionManager) DeleteSession(sessionID string) error { + s.Lock() + defer s.Unlock() + if _, ok := s.sessions[sessionID]; !ok { + return fmt.Errorf("attempted to delete non-existent session: %s", sessionID) + } + s.deletedSessionIDs[sessionID] = struct{}{} + delete(s.sessions, sessionID) + s.log.Info("Deleted session", "sessionID", sessionID) + return nil +} + +func (s *SessionManager) get(given *eth.SyncTesterSession) (*eth.SyncTesterSession, error) { + if given == nil { + s.log.Warn("No initial session value provided") + return nil, fmt.Errorf("no initial session value") + } + id := given.SessionID + s.Lock() + defer s.Unlock() + if _, ok := s.deletedSessionIDs[id]; ok { + s.log.Warn("Attempted to use deleted session", "sessionID", id) + return nil, fmt.Errorf("session already deleted: %s", id) + } + var sess *eth.SyncTesterSession + sess, ok := s.sessions[id] + if ok { + s.log.Trace("Using existing session", "sessionID", id) + } else { + s.sessions[id] = given + sess = given + s.log.Info("Initialized new session", "sessionID", id) + } + return sess, nil +} + +func WithSession[T any]( + mgr *SessionManager, + ctx context.Context, + logger log.Logger, + fn func(*eth.SyncTesterSession, log.Logger) (T, error), +) (T, error) { + var zero T + given, ok := SyncTesterSessionFromContext(ctx) + if !ok || given == nil { + return zero, fmt.Errorf("no session found in context") + } + session, err := mgr.get(given) + if err != nil { + return zero, err + } + // blocking + session.Lock() + defer session.Unlock() + // Bind session ID and starting fcu state + logger = logger.With("id", session.SessionID, "start_fcu", session.CurrentState) + return fn(session, logger) +} diff --git a/op-sync-tester/synctester/backend/sync_tester.go b/op-sync-tester/synctester/backend/sync_tester.go index 0ac5f9523f5dd..3dc2e18ea44a4 100644 --- a/op-sync-tester/synctester/backend/sync_tester.go +++ b/op-sync-tester/synctester/backend/sync_tester.go @@ -1,42 +1,51 @@ package backend import ( + "bytes" "context" + "encoding/hex" + "encoding/json" "errors" "fmt" - "math/big" - "sync" + "time" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-sync-tester/metrics" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/config" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/session" sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/frontend" ) -var ( - ErrNoSession = errors.New("no session") - ErrNoReceipts = errors.New("no receipts") -) - type SyncTester struct { - mu sync.RWMutex - log log.Logger m metrics.Metricer - id sttypes.SyncTesterID - chainID eth.ChainID - elClient *ethclient.Client + id sttypes.SyncTesterID + chainID eth.ChainID + + elReader ReadOnlyELBackend + + sessMgr *session.SessionManager +} - sessions map[string]*Session +// HeaderNumberOnly is a lightweight header type that only contains the +// block number field. It is useful in contexts where the full Ethereum +// block header is not needed, and only the block number is required. +type HeaderNumberOnly struct { + Number *hexutil.Big `json:"number" gencodec:"required"` } var _ frontend.SyncBackend = (*SyncTester)(nil) @@ -49,105 +58,720 @@ func SyncTesterFromConfig(logger log.Logger, m metrics.Metricer, stID sttypes.Sy if err != nil { return nil, fmt.Errorf("failed to dial EL client: %w", err) } + elReader := NewELReader(elClient) + logger.Info("Initialized sync tester from config", "syncTester", stID) + return NewSyncTester(logger, m, stID, stCfg.ChainID, elReader), nil +} + +func NewSyncTester(logger log.Logger, m metrics.Metricer, stID sttypes.SyncTesterID, chainID eth.ChainID, elReader ReadOnlyELBackend) *SyncTester { return &SyncTester{ log: logger, m: m, id: stID, - chainID: stCfg.ChainID, - elClient: elClient, - sessions: make(map[string]*Session), - }, nil -} - -func (s *SyncTester) fetchSession(ctx context.Context) (*Session, error) { - session, ok := SessionFromContext(ctx) - if !ok || session == nil { - return nil, fmt.Errorf("no session found in context") - } - s.mu.Lock() - defer s.mu.Unlock() - if existing, ok := s.sessions[session.SessionID]; ok { - s.log.Info("Using existing session", "session", existing) - } else { - s.sessions[session.SessionID] = session - s.log.Info("Initialized new session", "session", session) + chainID: chainID, + elReader: elReader, + sessMgr: session.NewSessionManager(logger), } - return session, nil } -func (s *SyncTester) GetSession(ctx context.Context) error { - // example session logic - _, err := s.fetchSession(ctx) - if err != nil { - return ErrNoSession - } - return nil +func (s *SyncTester) GetSession(ctx context.Context) (*eth.SyncTesterSession, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.SyncTesterSession, error) { + logger.Debug("GetSession") + return session, nil + }) } func (s *SyncTester) DeleteSession(ctx context.Context) error { - return nil + _, err := session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (any, error) { + logger.Debug("DeleteSession") + return struct{}{}, s.sessMgr.DeleteSession(session.SessionID) + }) + return err +} + +func (s *SyncTester) ResetSession(ctx context.Context) error { + _, err := session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (any, error) { + logger.Debug("ResetSession") + session.ResetSession() + return struct{}{}, nil + }) + return err } func (s *SyncTester) ListSessions(ctx context.Context) ([]string, error) { - return []string{}, nil + ids := s.sessMgr.SessionIDs() + s.log.Debug("ListSessions", "count", len(ids)) + return ids, nil } func (s *SyncTester) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) ([]*types.Receipt, error) { + logger.Debug("GetBlockReceipts", "blockNrOrHash", blockNrOrHash) + number, isNumber := blockNrOrHash.Number() + var err error + var receipts []*types.Receipt + if !isNumber { + // hash + receipts, err = s.elReader.GetBlockReceipts(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + } else { + var target uint64 + if target, err = s.checkBlockNumber(number, session, logger); err != nil { + return nil, err + } + receipts, err = s.elReader.GetBlockReceipts(ctx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(target))) + if err != nil { + return nil, err + } + } + if len(receipts) == 0 { + // Should never happen since every block except genesis has at least one deposit tx + logger.Warn("L2 Block has zero receipts", "blockNrHash", blockNrOrHash) + return nil, errors.New("no receipts") + } + target := receipts[0].BlockNumber.Uint64() + if target > session.CurrentState.Latest { + logger.Warn("Requested block is ahead of sync tester state", "requested", target) + return nil, ethereum.NotFound + } + return receipts, nil + }) } -func (s *SyncTester) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - return nil, nil +func (s *SyncTester) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (json.RawMessage, error) { + logger.Debug("GetBlockByHash", "hash", hash, "fullTx", fullTx) + var err error + var raw json.RawMessage + if raw, err = s.elReader.GetBlockByHashJSON(ctx, hash, fullTx); err != nil { + return nil, err + } + var header HeaderNumberOnly + if err := json.Unmarshal(raw, &header); err != nil { + return nil, err + } + target := header.Number.ToInt().Uint64() + if target > session.CurrentState.Latest { + logger.Warn("Requested block is ahead of sync tester state", "requested", target) + return nil, ethereum.NotFound + } + return raw, nil + }) } -func (s *SyncTester) GetBlockByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return nil, nil +func (s *SyncTester) checkBlockNumber(number rpc.BlockNumber, session *eth.SyncTesterSession, logger log.Logger) (uint64, error) { + var target uint64 + switch number { + case rpc.LatestBlockNumber: + target = session.CurrentState.Latest + case rpc.SafeBlockNumber: + target = session.CurrentState.Safe + case rpc.FinalizedBlockNumber: + target = session.CurrentState.Finalized + case rpc.PendingBlockNumber, rpc.EarliestBlockNumber: + // pending, earliest block label not supported + return 0, ethereum.NotFound + default: + if number.Int64() < 0 { + // safety guard for overflow + return 0, ethereum.NotFound + } + target = uint64(number.Int64()) + // Short circuit for numeric request beyond sync tester canonical head + if target > session.CurrentState.Latest { + logger.Warn("Requested block is ahead of sync tester state", "requested", target) + return 0, ethereum.NotFound + } + } + return target, nil } -func (s *SyncTester) ChainId(ctx context.Context) (eth.ChainID, error) { - return s.chainID, nil +func (s *SyncTester) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (json.RawMessage, error) { + logger.Debug("GetBlockByNumber", "number", number, "fullTx", fullTx) + var err error + var target uint64 + if target, err = s.checkBlockNumber(number, session, logger); err != nil { + return nil, err + } + var raw json.RawMessage + if raw, err = s.elReader.GetBlockByNumberJSON(ctx, rpc.BlockNumber(target), fullTx); err != nil { + return nil, err + } + return raw, nil + }) } -func (s *SyncTester) GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayload, error) { - return nil, nil +func (s *SyncTester) ChainId(ctx context.Context) (hexutil.Big, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (hexutil.Big, error) { + logger.Debug("ChainId") + chainID, err := s.elReader.ChainId(ctx) + if err != nil { + return hexutil.Big{}, err + } + if chainID.ToInt().Cmp(s.chainID.ToBig()) != 0 { + logger.Error("ChainId mismatch", "config", s.chainID, "backend", chainID.ToInt()) + return hexutil.Big{}, fmt.Errorf("chainID mismatch: config: %s, backend: %s", s.chainID, chainID.ToInt()) + } + return hexutil.Big(*s.chainID.ToBig()), nil + }) +} + +// GetPayloadV1 only supports V1 payloads. +func (s *SyncTester) GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ExecutionPayloadEnvelope, error) { + logger.Debug("GetPayloadV1", "payloadID", payloadID) + if !payloadID.Is(engine.PayloadV1) { + return nil, engine.UnsupportedFork + } + return s.getPayload(session, logger, payloadID) + }) } +// GetPayloadV2 supports V1, V2 payloads. func (s *SyncTester) GetPayloadV2(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ExecutionPayloadEnvelope, error) { + logger.Debug("GetPayloadV2", "payloadID", payloadID) + if !payloadID.Is(engine.PayloadV1, engine.PayloadV2) { + return nil, engine.UnsupportedFork + } + return s.getPayload(session, logger, payloadID) + }) } +// GetPayloadV3 must be only called when Ecotone activated. func (s *SyncTester) GetPayloadV3(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ExecutionPayloadEnvelope, error) { + logger.Debug("GetPayloadV3", "payloadID", payloadID) + if !payloadID.Is(engine.PayloadV3) { + return nil, engine.UnsupportedFork + } + return s.getPayload(session, logger, payloadID) + }) } +// GetPayloadV4 must be only called when Isthmus activated. func (s *SyncTester) GetPayloadV4(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ExecutionPayloadEnvelope, error) { + logger.Debug("GetPayloadV4", "payloadID", payloadID) + if !payloadID.Is(engine.PayloadV3) { + return nil, engine.UnsupportedFork + } + return s.getPayload(session, logger, payloadID) + }) } +// getPayload retrieves an execution payload previously initialized by +// ForkchoiceUpdated engine APIs when valid payload attributes were provided. +// Retrieved payloads are deleted from the session after being served to +// emulate one-time consumption by the consensus layer. +func (s *SyncTester) getPayload(session *eth.SyncTesterSession, logger log.Logger, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { + payloadEnv, ok := session.Payloads[payloadID] + if !ok { + return nil, engine.UnknownPayload + } + // Clean up payload + delete(session.Payloads, payloadID) + logger.Trace("Deleted payload", "payloadID", payloadID) + return payloadEnv, nil +} + +// ForkchoiceUpdatedV1 is called for processing V1 attributes func (s *SyncTester) ForkchoiceUpdatedV1(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ForkchoiceUpdatedResult, error) { + logger.Debug("ForkchoiceUpdatedV1", "state", state, "attr", attr) + return s.forkchoiceUpdated(ctx, session, logger, state, attr, engine.PayloadV1, false, false) + }) } +// ForkchoiceUpdatedV2 is called for processing V2 attributes func (s *SyncTester) ForkchoiceUpdatedV2(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ForkchoiceUpdatedResult, error) { + logger.Debug("ForkchoiceUpdatedV2", "state", state, "attr", attr) + return s.forkchoiceUpdated(ctx, session, logger, state, attr, engine.PayloadV2, true, false) + }) } +// ForkchoiceUpdatedV3 must be only called with Ecotone attributes func (s *SyncTester) ForkchoiceUpdatedV3(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.ForkchoiceUpdatedResult, error) { + logger.Debug("ForkchoiceUpdatedV3", "state", state, "attr", attr) + return s.forkchoiceUpdated(ctx, session, logger, state, attr, engine.PayloadV3, true, true) + }) +} + +// forkchoiceUpdated processes a forkchoice state update from the consensus +// layer, validates the request against the current execution layer state, and +// optionally initializes a new payload build process if payload attributes are +// provided. When payload attributes are not nil and validation succeeds, the +// derived payload is stored for later retrieval via GetPayload. +// +// Return values: +// - {status: VALID, latestValidHash: headBlockHash, payloadId: id} when the +// forkchoice state is applied successfully and payload attributes were +// provided and validated. +// - {status: VALID, latestValidHash: headBlockHash, payloadId: null} when the +// forkchoice state is applied successfully but no payload build was started +// (attr was not provided). +// - {status: INVALID, latestValidHash: null, validationError: err} when payload +// attributes are malformed or finalized/safe blocks are not canonical. +// - {status: SYNCING} when the head block is unknown or not yet validated, or +// when block data cannot be retrieved from the execution layer. +func (s *SyncTester) forkchoiceUpdated(ctx context.Context, session *eth.SyncTesterSession, logger log.Logger, state *eth.ForkchoiceState, attr *eth.PayloadAttributes, payloadVersion engine.PayloadVersion, + isCanyon, isEcotone bool, +) (*eth.ForkchoiceUpdatedResult, error) { + // Validate attributes shape + if attr != nil { + if isEcotone { + // https://github.com/ethereum/execution-apis/blob/bc5a37ee69a64769bd8d0a2056672361ef5f3839/src/engine/cancun.md#engine_forkchoiceupdatedv3 + // Spec: payloadAttributes matches the PayloadAttributesV3 structure, return -38003: Invalid payload attributes on failure. + // Ecotone activated Cancun + if attr.ParentBeaconBlockRoot == nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidPayloadAttributes.With(errors.New("missing beacon root")) + } + if attr.Withdrawals == nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidPayloadAttributes.With(errors.New("missing withdrawals")) + } + } else if isCanyon { + if attr.ParentBeaconBlockRoot != nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidPayloadAttributes.With(errors.New("unexpected beacon root")) + } + // Canyon activated Shanghai + if attr.Withdrawals == nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidPayloadAttributes.With(errors.New("missing withdrawals")) + } + } else { + // Bedrock + if attr.Withdrawals != nil || attr.ParentBeaconBlockRoot != nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidParams.With(errors.New("withdrawals and beacon root not supported")) + } + } + } + // Simulate head block hash check + candLatest, err := s.elReader.GetBlockByHash(ctx, state.HeadBlockHash) + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#specification-1 + // Spec: {payloadStatus: {status: SYNCING, latestValidHash: null, validationError: null}, payloadId: null} if forkchoiceState.headBlockHash references an unknown payload or a payload that can't be validated because requisite data for the validation is missing + if err != nil { + // Consider as sync error if read only EL interaction fails because we cannot validate + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionSyncing}, PayloadID: nil}, nil + } + if candLatest.NumberU64() > session.Validated { + // Let CL backfill via newPayload + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionSyncing}, PayloadID: nil}, nil + } + // Equivalent to SetCanonical + session.UpdateFCULatest(candLatest.NumberU64()) + logger.Debug("Updated FCU State", "latest", session.CurrentState.Latest) + // Simulate db check for finalized head + if state.FinalizedBlockHash != (common.Hash{}) { + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#specification-1 + // Spec: MUST return -38002: Invalid forkchoice state error if the payload referenced by forkchoiceState.headBlockHash is VALID and a payload referenced by either forkchoiceState.finalizedBlockHash or forkchoiceState.safeBlockHash does not belong to the chain defined by forkchoiceState.headBlockHash. + candFinalized, err := s.elReader.GetBlockByHash(ctx, state.FinalizedBlockHash) + if err != nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidForkChoiceState.With(errors.New("finalized block not available")) + } + finalizedNum := candFinalized.NumberU64() + if session.CurrentState.Latest < finalizedNum { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidForkChoiceState.With(errors.New("finalized block not canonical")) + } + // Equivalent to SetFinalized + session.UpdateFCUFinalized(finalizedNum) + logger.Debug("Updated FCU State", "finalized", session.CurrentState.Finalized) + } + // Simulate db check for safe head + if state.SafeBlockHash != (common.Hash{}) { + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#specification-1 + // Spec: MUST return -38002: Invalid forkchoice state error if the payload referenced by forkchoiceState.headBlockHash is VALID and a payload referenced by either forkchoiceState.finalizedBlockHash or forkchoiceState.safeBlockHash does not belong to the chain defined by forkchoiceState.headBlockHash. + candSafe, err := s.elReader.GetBlockByHash(ctx, state.SafeBlockHash) + if err != nil { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidForkChoiceState.With(errors.New("safe block not available")) + } + safeNum := candSafe.NumberU64() + if session.CurrentState.Latest < safeNum { + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidForkChoiceState.With(errors.New("safe block not canonical")) + } + // Equivalent to SetSafe + session.UpdateFCUSafe(safeNum) + logger.Debug("Updated FCU State", "safe", session.CurrentState.Safe) + } + var id *engine.PayloadID + if attr != nil { + // attr is the ingredient for the block built after the head block + candNum := int64(candLatest.NumberU64()) + // Query read only EL to fetch block which is desired to be produced from attr + newBlock, err := s.elReader.GetBlockByNumber(ctx, rpc.BlockNumber(candNum+1)) + if err != nil { + // Consider as sync error if read only EL interaction fails because we cannot validate + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionSyncing}, PayloadID: nil}, nil + } + // https://github.com/ethereum-optimism/specs/blob/972dec7c7c967800513c354b2f8e5b79340de1c3/specs/protocol/holocene/exec-engine.md#eip-1559-parameters-in-block-header + // Implicitly determine whether holocene is enabled by inspecting extraData from read only EL data + isHolocene := eip1559.ValidateHoloceneExtraData(newBlock.Header().Extra) == nil + // Sanity check attr comparing with newBlock + if err := s.validateAttributesForBlock(attr, newBlock, isHolocene); err != nil { + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#specification-1 + // Client software MUST respond to this method call in the following way: {error: {code: -38003, message: "Invalid payload attributes"}} if the payload is deemed VALID and forkchoiceState has been applied successfully, but no build process has been started due to invalid payloadAttributes. + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.InvalidPayloadAttributes.With(err) + } + // https://github.com/ethereum-optimism/specs/blob/7b39adb0bea3b0a56d6d3a7d61feef5c33e49b73/specs/protocol/isthmus/exec-engine.md#header-validity-rules + // Implicitly determine whether isthmus is enabled by inspecting withdrawalsRoot from read only EL data + isIsthmus := newBlock.WithdrawalsRoot() != nil && len(*newBlock.WithdrawalsRoot()) == 32 + // Initialize payload args for sane payload ID + // All attr fields already sanity checked + args := miner.BuildPayloadArgs{ + Parent: state.HeadBlockHash, + Timestamp: uint64(attr.Timestamp), + FeeRecipient: attr.SuggestedFeeRecipient, + Random: common.Hash(attr.PrevRandao), + BeaconRoot: attr.ParentBeaconBlockRoot, + NoTxPool: attr.NoTxPool, + Transactions: newBlock.Transactions(), + GasLimit: &newBlock.Header().GasLimit, + Version: payloadVersion, + } + config := ¶ms.ChainConfig{} + if isCanyon { + args.Withdrawals = *attr.Withdrawals + config.CanyonTime = new(uint64) + } + if isHolocene { + args.EIP1559Params = (*attr.EIP1559Params)[:] + } + if isIsthmus { + config.IsthmusTime = new(uint64) + } + payloadID := args.Id() + id = &payloadID + payloadEnv, err := eth.BlockAsPayloadEnv(newBlock, config) + if err != nil { + // The failure is from the EL processing so consider as a server error and make CL retry + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionInvalid}, PayloadID: nil}, engine.GenericServerError.With(err) + } + // Store payload and payloadID. This will be processed using GetPayload engine API + logger.Debug("Store payload", "payloadID", payloadID) + session.Payloads[payloadID] = payloadEnv + } + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#specification-1 + // Spec: Client software MUST respond to this method call in the following way: {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, validationError: null}, payloadId: buildProcessId} if the payload is deemed VALID and the build process has begun + return ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid, LatestValidHash: &state.HeadBlockHash}, PayloadID: id}, nil +} + +// validateAttributesForBlock verifies that a given block matches the expected +// execution payload attributes. It ensures consistency between the provided +// PayloadAttributes and the block header and body. +// +// OP Stack additions: +// - Transaction count and raw transaction bytes must match exactly. +// - NoTxPool must be always true, since sync tester only runs in verifier mode. +// - Gas limit must match. +// - If Holocene is active: Extra data must be exactly 9 bytes, the version byte must equal to 0, +// the remaining 8 bytes must match the EIP-1559 parameters. +// +// Returns an error if any mismatch or invalid condition is found, otherwise nil. +func (s *SyncTester) validateAttributesForBlock(attr *eth.PayloadAttributes, block *types.Block, isHolocene bool) error { + h := block.Header() + if h.Time != uint64(attr.Timestamp) { + return fmt.Errorf("timestamp mismatch: header=%d, attr=%d", h.Time, attr.Timestamp) + } + if h.MixDigest != common.Hash(attr.PrevRandao) { + return fmt.Errorf("prevRandao mismatch: header=%s, attr=%s", h.MixDigest, attr.PrevRandao) + } + if h.Coinbase != attr.SuggestedFeeRecipient { + return fmt.Errorf("coinbase mismatch: header=%s, attr=%s", h.Coinbase, attr.SuggestedFeeRecipient) + } + if attr.Withdrawals != nil && len(*attr.Withdrawals) != 0 { + return errors.New("withdrawals must be nil or empty") + } + if (attr.ParentBeaconBlockRoot == nil) != (h.ParentBeaconRoot == nil) { + return fmt.Errorf("parentBeaconBlockRoot mismatch: attr=%v, header=%v", attr.ParentBeaconBlockRoot, h.ParentBeaconRoot) + } + if h.ParentBeaconRoot != nil && (*attr.ParentBeaconBlockRoot).Cmp(*h.ParentBeaconRoot) != 0 { + return fmt.Errorf("parentBeaconBlockRoot mismatch: attr=%s, header=%s", *attr.ParentBeaconBlockRoot, *h.ParentBeaconRoot) + } + // OP Stack additions + if len(attr.Transactions) != len(block.Transactions()) { + return fmt.Errorf("tx count mismatch: attr=%d, block=%d", len(attr.Transactions), len(block.Transactions())) + } + for idx := range len(attr.Transactions) { + blockTx := block.Transactions()[idx] + blockTxRaw, err := blockTx.MarshalBinary() + if err != nil { + return fmt.Errorf("failed to marshal block tx: %w", err) + } + if !bytes.Equal([]byte(attr.Transactions[idx]), blockTxRaw) { + return fmt.Errorf("tx mismatch: tx=%s, idx=%d", attr.Transactions[idx], idx) + } + } + if !attr.NoTxPool { + // Sync Tester only supports verifier sync + return errors.New("txpool cannot be enabled yet") + } + if *attr.GasLimit != eth.Uint64Quantity(h.GasLimit) { + return fmt.Errorf("gaslimit mismatch: attr=%d, header=%d", *attr.GasLimit, h.GasLimit) + } + if isHolocene { + // https://github.com/ethereum-optimism/specs/blob/972dec7c7c967800513c354b2f8e5b79340de1c3/specs/protocol/holocene/exec-engine.md#encoding + // Spec: At and after Holocene activation, eip1559Parameters in PayloadAttributeV3 must be exactly 8 bytes with the following format + if attr.EIP1559Params == nil { + return errors.New("holocene enabled but EIP1559Params nil") + } + if err := eip1559.ValidateHolocene1559Params((*attr.EIP1559Params)[:]); err != nil { + return fmt.Errorf("invalid eip1559Params: %w", err) + } + denominator, elasticity := eip1559.DecodeHolocene1559Params((*attr.EIP1559Params)[:]) + if denominator == 0 && elasticity == 0 { + // https://github.com/ethereum-optimism/specs/blob/972dec7c7c967800513c354b2f8e5b79340de1c3/specs/protocol/holocene/exec-engine.md#payload-attributes-processing + // Spec: The denominator and elasticity values within this extraData must correspond to those in eip1559Parameters, unless both are 0. When both are 0, the prior EIP-1559 constants must be used to populate extraData instead. + // Cannot validate since EL will fall back to prior eip1559 constants + return nil + } + if !bytes.Equal(block.Extra()[1:], (*attr.EIP1559Params)[:]) { + return fmt.Errorf("eip1559Params mismatch: %s != 0x%s", *attr.EIP1559Params, hex.EncodeToString(block.Extra()[1:])) + } + } else { + // https://github.com/ethereum-optimism/specs/blob/972dec7c7c967800513c354b2f8e5b79340de1c3/specs/protocol/holocene/exec-engine.md#payload-attributes-processing + // Spec: Prior to Holocene activation, eip1559Parameters in PayloadAttributesV3 must be null and is otherwise considered invalid. + if attr.EIP1559Params != nil { + return fmt.Errorf("holocene disabled but EIP1559Params not nil. eip1559Params: %s", attr.EIP1559Params) + } + } + return nil } +// NewPayloadV1 must be only called with Bedrock Payload func (s *SyncTester) NewPayloadV1(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.PayloadStatusV1, error) { + logger.Debug("NewPayloadV1", "payload", payload) + return s.newPayload(ctx, session, logger, payload, nil, nil, nil, false, false) + }) } +// NewPayloadV2 must be only called with Bedrock, Canyon, Delta Payload func (s *SyncTester) NewPayloadV2(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.PayloadStatusV1, error) { + logger.Debug("NewPayloadV2", "payload", payload) + return s.newPayload(ctx, session, logger, payload, nil, nil, nil, false, false) + }) } +// NewPayloadV3 must be only called with Ecotone Payload func (s *SyncTester) NewPayloadV3(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash) (*eth.PayloadStatusV1, error) { - return nil, nil + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.PayloadStatusV1, error) { + logger.Debug("NewPayloadV3", "payload", payload, "versionedHashes", versionedHashes, "beaconRoot", beaconRoot) + return s.newPayload(ctx, session, logger, payload, versionedHashes, beaconRoot, nil, true, false) + }) } +// NewPayloadV4 must be only called with Isthmus payload func (s *SyncTester) NewPayloadV4(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) { + return session.WithSession(s.sessMgr, ctx, s.log, func(session *eth.SyncTesterSession, logger log.Logger) (*eth.PayloadStatusV1, error) { + logger.Debug("NewPayloadV4", "payload", payload, "versionedHashes", versionedHashes, "beaconRoot", beaconRoot, "executionRequests", executionRequests) + return s.newPayload(ctx, session, logger, payload, versionedHashes, beaconRoot, executionRequests, true, true) + }) +} + +func (s *SyncTester) validatePayload(logger log.Logger, isCanyon, isIsthmus bool, block *types.Block, payload *eth.ExecutionPayload, beaconRoot *common.Hash) (*eth.PayloadStatusV1, error) { + // Already have the block locally or advance single block without setting the head + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/shanghai.md#specification + // Spec: MUST return {status: INVALID, latestValidHash: null, validationError: errorMessage | null} if the blockHash validation has failed. + blockHash := block.Hash() + config := ¶ms.ChainConfig{} + if isCanyon { + config.CanyonTime = new(uint64) + } + if isIsthmus { + config.IsthmusTime = new(uint64) + } + correctPayload, err := eth.BlockAsPayload(block, config) + if err != nil { + // The failure is from the EL processing so consider as a server error and make CL retry + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.GenericServerError.With(wrapSyncTesterError("failed to convert block to payload", err)) + } + // Sanity check parent beacon block root and block hash by recomputation + if !isIsthmus { + // Depopulate withdrawal root field for block hash recomputation + if payload.WithdrawalsRoot != nil { + logger.Warn("Isthmus disabled but withdrawal roots included in payload not nil", "root", payload.WithdrawalsRoot) + } + payload.WithdrawalsRoot = nil + } + // Check given payload matches the payload derived using the read only EL block + if err := correctPayload.CheckEqual(payload); err != nil { + // Consider as block hash validation error when payload mismatch + return s.newPayloadInvalid(fmt.Errorf("payload check mismatch: %w", err), nil), nil + } + execEnvelope := eth.ExecutionPayloadEnvelope{ParentBeaconBlockRoot: beaconRoot, ExecutionPayload: payload} + actual, ok := execEnvelope.CheckBlockHash() + if blockHash != payload.BlockHash || !ok { + return s.newPayloadInvalid(fmt.Errorf("block hash check from execution envelope failed. %s != %s", blockHash, actual), nil), nil + } return nil, nil } + +// newPayload validates and processes a new execution payload according to the +// Engine API rules to simulate consensus-layer to execution-layer interactions +// without advancing canonical chain state. +// +// The method enforces mandatory post-fork fields, including withdrawals, excessBlobGas, +// blobGasUsed, versionedHashes, beaconRoot, executionRequests, and withdrawalsRoot, +// returning an InvalidParams error if any are missing or improperly shaped. +// +// Return values: +// - {status: VALID, latestValidHash: payload.blockHash} if validation succeeds. +// - {status: INVALID, latestValidHash: null, validationError: err} on mismatch +// or malformed payloads. +// - {status: SYNCING} when the block cannot be executed because its parent is missing. +// - Errors surfaced as engine.InvalidParams or engine.GenericServerError to +// trigger appropriate consensus-layer retries. +func (s *SyncTester) newPayload(ctx context.Context, session *eth.SyncTesterSession, logger log.Logger, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes, + isEcotone, isIsthmus bool, +) (*eth.PayloadStatusV1, error) { + // Validate request shape, fork required fields + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/shanghai.md#engine_newpayloadv2 + // Spec: Client software MUST return -32602: Invalid params error if the wrong version of the structure is used in the method call. + if isEcotone { + if payload.ExcessBlobGas == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil excessBlobGas post-cancun")) + } + if payload.BlobGasUsed == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil blobGasUsed post-cancun")) + } + if versionedHashes == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil versionedHashes post-cancun")) + } + if beaconRoot == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil beaconRoot post-cancun")) + } + } else { + if payload.ExcessBlobGas != nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("non-nil excessBlobGas pre-cancun")) + } + if payload.BlobGasUsed != nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("non-nil blobGasUsed pre-cancun")) + } + } + if isIsthmus { + if executionRequests == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil executionRequests post-prague")) + } + } + // OP Stack specific request shape validation + if isEcotone { + if len(versionedHashes) != 0 { + // https://github.com/ethereum-optimism/specs/blob/a773587fca6756f8468164613daa79fcee7bbbe4/specs/protocol/exec-engine.md#engine_newpayloadv3 + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(fmt.Errorf("versionedHashes length non-zero: %d", len(versionedHashes))) + } + } + if isIsthmus { + if payload.WithdrawalsRoot == nil { + // https://github.com/ethereum-optimism/specs/blob/7b39adb0bea3b0a56d6d3a7d61feef5c33e49b73/specs/protocol/isthmus/exec-engine.md#update-to-executionpayload + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil withdrawalsRoot post-isthmus")) + } + if len(executionRequests) != 0 { + // https://github.com/ethereum-optimism/specs/blob/a773587fca6756f8468164613daa79fcee7bbbe4/specs/protocol/exec-engine.md#engine_newpayloadv4 + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(fmt.Errorf("executionRequests must be empty array but got %d", len(executionRequests))) + } + } + // Look up canonical block for relay comparison + block, err := s.elReader.GetBlockByHash(ctx, payload.BlockHash) + if err != nil { + if !errors.Is(err, ethereum.NotFound) { + // Do not retry when error did not occur because of Not found error + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.GenericServerError.With(wrapSyncTesterError("failed to fetch block", err)) + } + // Not found error may be recovered when given payload is near the sequencer tip. + // Read only EL may not be ready yet. In this case, retry once more after waiting block time (2 seconds) + logger.Warn("Block not found while validating new payload. Retrying", "number", payload.BlockNumber, "hash", payload.BlockHash) + select { + case <-time.After(2 * time.Second): + case <-ctx.Done(): + // Handle case when context cancelled while waiting. + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.GenericServerError.With(fmt.Errorf("context done: %w", ctx.Err())) + } + block, err = s.elReader.GetBlockByHash(ctx, payload.BlockHash) + if err != nil { + if errors.Is(err, ethereum.NotFound) { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.GenericServerError.With(wrapSyncTesterError("block not found after retry", err)) + } + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.GenericServerError.With(wrapSyncTesterError("failed to fetch block after retry", err)) + } + // Use block info fetched by retrying + } + // https://github.com/ethereum-optimism/specs/blob/972dec7c7c967800513c354b2f8e5b79340de1c3/specs/protocol/derivation.md#building-individual-payload-attributes + // Implicitly determine whether canyon is enabled by inspecting withdrawals from read only EL data + isCanyon := block.Withdrawals() != nil + if isCanyon { + if payload.Withdrawals == nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("nil withdrawals post-shanghai")) + } + } else { + if payload.Withdrawals != nil { + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid}, engine.InvalidParams.With(errors.New("non-nil withdrawals pre-shanghai")) + } + } + blockHash := block.Hash() + blockNumber := block.NumberU64() + // We only attempt to advance non-canonical view of the chain, following the read only EL + if blockNumber <= session.Validated+1 { + if status, err := s.validatePayload(logger, isCanyon, isIsthmus, block, payload, beaconRoot); status != nil { + return status, err + } + if blockNumber == session.Validated+1 { + // Advance single block without setting the head, equivalent to geth InsertBlockWithoutSetHead + session.Validated += 1 + logger.Debug("Advanced non canonical chain", "validated", session.Validated) + } + if !session.IsELSyncFinished() && session.Validated == session.ELSyncTarget { + // Can reach here when not doing EL Sync on CL side but session is configured for EL Sync + logger.Debug("Non canonical chain reached EL Sync target", "validated", session.Validated) + session.FinishELSync(session.Validated) + } + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#payload-validation + // Spec: If validation succeeds, the response MUST contain {status: VALID, latestValidHash: payload.blockHash} + return ð.PayloadStatusV1{Status: eth.ExecutionValid, LatestValidHash: &blockHash}, nil + } else if !session.IsELSyncFinished() { + if blockNumber == session.ELSyncTarget { + logger.Debug("Attempting to finish EL Sync on non canonical chain", "target", session.ELSyncTarget) + if status, err := s.validatePayload(logger, isCanyon, isIsthmus, block, payload, beaconRoot); status != nil { + return status, err + } + session.FinishELSync(blockNumber) + logger.Debug("Finished EL Sync by advancing non canonical chain", "validated", session.Validated) + // https://github.com/ethereum/execution-apis/blob/584905270d8ad665718058060267061ecfd79ca5/src/engine/paris.md#payload-validation + // Spec: If validation succeeds, the response MUST contain {status: VALID, latestValidHash: payload.blockHash} + return ð.PayloadStatusV1{Status: eth.ExecutionValid, LatestValidHash: &blockHash}, nil + } else if blockNumber < session.ELSyncTarget { + logger.Trace("EL Sync on progress", "target", blockNumber) + } else if blockNumber > session.ELSyncTarget { + // L2CL may never reach the EL Sync Target because the current number may keep increasing + logger.Warn("Received payload which has larger block number than EL Sync target", "current", blockNumber, "target", session.ELSyncTarget) + } + } + // Block not available so mark as syncing + return ð.PayloadStatusV1{Status: eth.ExecutionSyncing}, nil +} + +func wrapSyncTesterError(msg string, err error) error { + if err == nil { + return fmt.Errorf("sync tester: %s", msg) + } + return fmt.Errorf("sync tester: %s: %w", msg, err) +} + +func (s *SyncTester) newPayloadInvalid(err error, latestValid *types.Header) *eth.PayloadStatusV1 { + var currentHash *common.Hash + if latestValid != nil { + if latestValid.Difficulty.BitLen() != 0 { + // Set latest valid hash to 0x0 if parent is PoW block + currentHash = &common.Hash{} + } else { + // Otherwise set latest valid hash to parent hash + h := latestValid.Hash() + currentHash = &h + } + } + errorMsg := err.Error() + return ð.PayloadStatusV1{Status: eth.ExecutionInvalid, LatestValidHash: currentHash, ValidationError: &errorMsg} +} diff --git a/op-sync-tester/synctester/backend/sync_tester_test.go b/op-sync-tester/synctester/backend/sync_tester_test.go new file mode 100644 index 0000000000000..4caafae7a8ffb --- /dev/null +++ b/op-sync-tester/synctester/backend/sync_tester_test.go @@ -0,0 +1,480 @@ +package backend + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/session" + sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +var _ ReadOnlyELBackend = (*MockELReader)(nil) + +type MockELReader struct { + ChainID hexutil.Big + + BlocksByHash map[common.Hash]*json.RawMessage + BlocksByNumber map[rpc.BlockNumber]*json.RawMessage + + ReceiptsByHash map[common.Hash][]*types.Receipt + ReceiptsByNumber map[rpc.BlockNumber][]*types.Receipt + + Latest *json.RawMessage + Safe *json.RawMessage + Finalized *json.RawMessage +} + +func NewMockELReader(chainID eth.ChainID) *MockELReader { + return &MockELReader{ + ChainID: hexutil.Big(*chainID.ToBig()), + BlocksByHash: make(map[common.Hash]*json.RawMessage), + BlocksByNumber: make(map[rpc.BlockNumber]*json.RawMessage), + ReceiptsByHash: make(map[common.Hash][]*types.Receipt), + ReceiptsByNumber: make(map[rpc.BlockNumber][]*types.Receipt), + } +} + +func (m *MockELReader) ChainId(ctx context.Context) (hexutil.Big, error) { + return m.ChainID, nil +} + +func (m *MockELReader) GetBlockByNumberJSON(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) { + raw, ok := m.BlocksByNumber[number] + if !ok { + return nil, ethereum.NotFound + } + return *raw, nil +} + +func (m *MockELReader) GetBlockByHashJSON(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) { + raw, ok := m.BlocksByHash[hash] + if !ok { + return nil, ethereum.NotFound + } + return *raw, nil +} + +func (m *MockELReader) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + return nil, nil +} + +func (m *MockELReader) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return nil, nil +} + +func (m *MockELReader) GetBlockReceipts(ctx context.Context, bnh rpc.BlockNumberOrHash) ([]*types.Receipt, error) { + hash, isHash := bnh.Hash() + if isHash { + receipts, ok := m.ReceiptsByHash[hash] + if !ok { + return nil, ethereum.NotFound + } + return receipts, nil + } + number, isNumber := bnh.Number() + if !isNumber { + // bnh is not a number and not a hash so return not found + return nil, ethereum.NotFound + } + receipts, ok := m.ReceiptsByNumber[number] + if !ok { + return nil, ethereum.NotFound + } + return receipts, nil +} + +func initTestSyncTester(t *testing.T, chainID eth.ChainID, elReader ReadOnlyELBackend) *SyncTester { + syncTester := NewSyncTester(testlog.Logger(t, log.LevelInfo), nil, sttypes.SyncTesterID("test"), chainID, elReader) + return syncTester +} + +func TestSyncTester_ChainId(t *testing.T) { + dummySession := ð.SyncTesterSession{SessionID: uuid.New().String()} + tests := []struct { + name string + cfgID eth.ChainID + elID eth.ChainID + session *eth.SyncTesterSession + wantErrContains string + }{ + { + name: "no session", + cfgID: eth.ChainIDFromUInt64(1), + elID: eth.ChainIDFromUInt64(1), + wantErrContains: "no session", + }, + { + name: "happy path", + cfgID: eth.ChainIDFromUInt64(11155111), + elID: eth.ChainIDFromUInt64(11155111), + session: dummySession, + }, + { + name: "mismatch", + cfgID: eth.ChainIDFromUInt64(1), + elID: eth.ChainIDFromUInt64(11155111), + session: dummySession, + wantErrContains: "chainID mismatch", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + mock := NewMockELReader(tc.elID) + st := initTestSyncTester(t, tc.cfgID, mock) + ctx := context.Background() + if tc.session != nil { + ctx = session.WithSyncTesterSession(ctx, tc.session) + } + got, err := st.ChainId(ctx) + if tc.wantErrContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErrContains) + return + } + require.NoError(t, err) + require.Equal(t, hexutil.Big(*tc.cfgID.ToBig()), got) + }) + } +} + +func makeBlockRaw(num uint64) *json.RawMessage { + raw := json.RawMessage(fmt.Sprintf(`{"number":"0x%x"}`, num)) + return &raw +} + +func TestSyncTester_GetBlockByHash(t *testing.T) { + hash := common.HexToHash("0xdeadbeef") + tests := []struct { + name string + sessionLatest uint64 + rawNumber uint64 // block.number returned by EL + session *eth.SyncTesterSession + wantErrContains string + }{ + { + name: "no session", + sessionLatest: 0, + rawNumber: 0, + session: nil, + wantErrContains: "no session", + }, + { + name: "block number greater than latest", + sessionLatest: 100, + rawNumber: 101, // greater than Latest + session: ð.SyncTesterSession{SessionID: uuid.New().String(), CurrentState: eth.FCUState{Latest: 100}}, + wantErrContains: "not found", + }, + { + name: "happy path", + sessionLatest: 100, + rawNumber: 99, + session: ð.SyncTesterSession{SessionID: uuid.New().String(), CurrentState: eth.FCUState{Latest: 100}}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + el := NewMockELReader(eth.ChainIDFromUInt64(1)) + block := makeBlockRaw(tc.rawNumber) + el.BlocksByHash[hash] = block + st := initTestSyncTester(t, eth.ChainIDFromUInt64(1), el) + ctx := context.Background() + if tc.session != nil { + ctx = session.WithSyncTesterSession(ctx, tc.session) + } + raw, err := st.GetBlockByHash(ctx, hash, false) + if tc.wantErrContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErrContains) + return + } + require.NoError(t, err) + require.NotNil(t, raw) + + var header HeaderNumberOnly + require.NoError(t, json.Unmarshal(raw, &header)) + require.EqualValues(t, tc.rawNumber, header.Number.ToInt().Uint64()) + }) + } +} + +func TestSyncTester_GetBlockByNumber(t *testing.T) { + type testCase struct { + name string + session *eth.SyncTesterSession + inNumber rpc.BlockNumber + wantNum uint64 + wantErrContains string + } + + tests := []testCase{ + { + name: "no session", + session: nil, + wantErrContains: "no session", + }, + { + name: "happy path: numeric less than latest", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 95, + Finalized: 90, + }, + }, + inNumber: rpc.BlockNumber(99), + wantNum: 99, + }, + { + name: "happy path: label latest returns latest", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 95, + Finalized: 90, + }, + }, + inNumber: rpc.LatestBlockNumber, + wantNum: 100, + }, + { + name: "happy path: label safe returns safe", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 97, + Finalized: 90, + }, + }, + inNumber: rpc.SafeBlockNumber, + wantNum: 97, + }, + { + name: "happy path: label finalized returns finalized", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 97, + Finalized: 92, + }, + }, + inNumber: rpc.FinalizedBlockNumber, + wantNum: 92, + }, + { + name: "pending returns not found", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + inNumber: rpc.PendingBlockNumber, + wantErrContains: "not found", + }, + { + name: "earliest label returns not found", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + inNumber: rpc.EarliestBlockNumber, + wantErrContains: "not found", + }, + { + name: "numeric greater than latest returns not found", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + inNumber: rpc.BlockNumber(101), + wantErrContains: "not found", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + el := NewMockELReader(eth.ChainIDFromUInt64(1)) + if tc.session != nil { + el.BlocksByNumber[rpc.BlockNumber(tc.session.CurrentState.Latest)] = makeBlockRaw(tc.session.CurrentState.Latest) + el.BlocksByNumber[rpc.BlockNumber(tc.session.CurrentState.Safe)] = makeBlockRaw(tc.session.CurrentState.Safe) + el.BlocksByNumber[rpc.BlockNumber(tc.session.CurrentState.Finalized)] = makeBlockRaw(tc.session.CurrentState.Finalized) + } + el.BlocksByNumber[tc.inNumber] = makeBlockRaw(uint64(tc.inNumber.Int64())) + st := initTestSyncTester(t, eth.ChainIDFromUInt64(1), el) + ctx := context.Background() + if tc.session != nil { + ctx = session.WithSyncTesterSession(ctx, tc.session) + } + raw, err := st.GetBlockByNumber(ctx, tc.inNumber, false) + if tc.wantErrContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErrContains) + return + } + require.NoError(t, err) + require.NotNil(t, raw) + var header HeaderNumberOnly + require.NoError(t, json.Unmarshal(raw, &header)) + require.EqualValues(t, tc.wantNum, header.Number.ToInt().Uint64()) + }) + } +} + +func TestSyncTester_GetBlockReceipts(t *testing.T) { + makeReceipts := func(n uint64) []*types.Receipt { + r := new(types.Receipt) + r.BlockNumber = new(big.Int).SetUint64(n) + return []*types.Receipt{r} + } + type testCase struct { + name string + session *eth.SyncTesterSession + arg rpc.BlockNumberOrHash + seedFn func(el *MockELReader, s *eth.SyncTesterSession) + wantFirstBN uint64 + wantErrContains string + } + hashGood := common.HexToHash("0xabc1") + hashTooNew := common.HexToHash("0xabc2") + tests := []testCase{ + { + name: "no session", + session: nil, + arg: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), + wantErrContains: "no session", + }, + { + name: "happy: via hash, blockNumber less than latest", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 95, + Finalized: 90, + }, + }, + arg: rpc.BlockNumberOrHashWithHash(hashGood, false), + seedFn: func(el *MockELReader, s *eth.SyncTesterSession) { + el.ReceiptsByHash[hashGood] = makeReceipts(s.CurrentState.Latest - 1) + }, + wantFirstBN: 99, + }, + { + name: "bad: via hash, blockNumber >= latest returns not found", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{ + Latest: 100, + Safe: 95, + Finalized: 90, + }, + }, + arg: rpc.BlockNumberOrHashWithHash(hashTooNew, false), + seedFn: func(el *MockELReader, s *eth.SyncTesterSession) { + // strictly greater than Latest so the post-check triggers NotFound + el.ReceiptsByHash[hashTooNew] = makeReceipts(s.CurrentState.Latest + 1) + }, + wantErrContains: "not found", + }, + { + name: "happy: label latest returns latest", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 95, Finalized: 90}, + }, + arg: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), + seedFn: func(el *MockELReader, s *eth.SyncTesterSession) { + el.ReceiptsByNumber[rpc.BlockNumber(s.CurrentState.Latest)] = makeReceipts(s.CurrentState.Latest) + }, + wantFirstBN: 100, + }, + { + name: "happy: label safe returns safe", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 90}, + }, + arg: rpc.BlockNumberOrHashWithNumber(rpc.SafeBlockNumber), + seedFn: func(el *MockELReader, s *eth.SyncTesterSession) { + el.ReceiptsByNumber[rpc.BlockNumber(s.CurrentState.Safe)] = makeReceipts(s.CurrentState.Safe) + }, + wantFirstBN: 97, + }, + { + name: "happy: label finalized returns finalized", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + arg: rpc.BlockNumberOrHashWithNumber(rpc.FinalizedBlockNumber), + seedFn: func(el *MockELReader, s *eth.SyncTesterSession) { + el.ReceiptsByNumber[rpc.BlockNumber(s.CurrentState.Finalized)] = makeReceipts(s.CurrentState.Finalized) + }, + wantFirstBN: 92, + }, + { + name: "happy: numeric less than latest", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + arg: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(99)), + seedFn: func(el *MockELReader, _ *eth.SyncTesterSession) { + el.ReceiptsByNumber[rpc.BlockNumber(99)] = makeReceipts(99) + }, + wantFirstBN: 99, + }, + { + name: "bad: numeric greater than latest returns not found", + session: ð.SyncTesterSession{ + SessionID: uuid.New().String(), + CurrentState: eth.FCUState{Latest: 100, Safe: 97, Finalized: 92}, + }, + arg: rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(101)), + wantErrContains: "not found", + // No seeding needed: checkBlockNumber should fail before EL call + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + el := NewMockELReader(eth.ChainIDFromUInt64(1)) + if tc.seedFn != nil && tc.session != nil { + tc.seedFn(el, tc.session) + } + st := initTestSyncTester(t, eth.ChainIDFromUInt64(1), el) + ctx := context.Background() + if tc.session != nil { + ctx = session.WithSyncTesterSession(ctx, tc.session) + } + recs, err := st.GetBlockReceipts(ctx, tc.arg) + if tc.wantErrContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErrContains) + return + } + require.NoError(t, err) + require.NotNil(t, recs) + require.GreaterOrEqual(t, len(recs), 1) + require.EqualValues(t, tc.wantFirstBN, recs[0].BlockNumber.Uint64()) + }) + } +} diff --git a/op-sync-tester/synctester/frontend/engine.go b/op-sync-tester/synctester/frontend/engine.go index d34af81b0c200..eefdcd4a6b6be 100644 --- a/op-sync-tester/synctester/frontend/engine.go +++ b/op-sync-tester/synctester/frontend/engine.go @@ -3,25 +3,14 @@ package frontend import ( "context" + "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) type EngineBackend interface { - GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayload, error) - GetPayloadV2(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) - GetPayloadV3(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) - GetPayloadV4(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) - - ForkchoiceUpdatedV1(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) - ForkchoiceUpdatedV2(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) - ForkchoiceUpdatedV3(ctx context.Context, state *eth.ForkchoiceState, attr *eth.PayloadAttributes) (*eth.ForkchoiceUpdatedResult, error) - - NewPayloadV1(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) - NewPayloadV2(ctx context.Context, payload *eth.ExecutionPayload) (*eth.PayloadStatusV1, error) - NewPayloadV3(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash) (*eth.PayloadStatusV1, error) - NewPayloadV4(ctx context.Context, payload *eth.ExecutionPayload, versionedHashes []common.Hash, beaconRoot *common.Hash, executionRequests []hexutil.Bytes) (*eth.PayloadStatusV1, error) + apis.EngineAPI } type EngineFrontend struct { @@ -32,7 +21,7 @@ func NewEngineFrontend(b EngineBackend) *EngineFrontend { return &EngineFrontend{b: b} } -func (e *EngineFrontend) GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayload, error) { +func (e *EngineFrontend) GetPayloadV1(ctx context.Context, payloadID eth.PayloadID) (*eth.ExecutionPayloadEnvelope, error) { return e.b.GetPayloadV1(ctx, payloadID) } diff --git a/op-sync-tester/synctester/frontend/eth.go b/op-sync-tester/synctester/frontend/eth.go index bf9bf1eaac93b..98a0001fbd83a 100644 --- a/op-sync-tester/synctester/frontend/eth.go +++ b/op-sync-tester/synctester/frontend/eth.go @@ -2,21 +2,19 @@ package frontend import ( "context" - "math/big" + "encoding/json" + + "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/rpc" ) type EthBackend interface { - GetBlockByNumber(ctx context.Context, number *big.Int) (*types.Header, error) - GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Header, error) - GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) - ChainId(ctx context.Context) (eth.ChainID, error) + apis.EthAPI } - type EthFrontend struct { b EthBackend } @@ -25,18 +23,18 @@ func NewEthFrontend(b EthBackend) *EthFrontend { return &EthFrontend{b: b} } -func (e *EthFrontend) GetBlockByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return e.b.GetBlockByNumber(ctx, number) +func (e *EthFrontend) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (json.RawMessage, error) { + return e.b.GetBlockByNumber(ctx, number, fullTx) } -func (e *EthFrontend) GetBlockByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { - return e.b.GetBlockByHash(ctx, hash) +func (e *EthFrontend) GetBlockByHash(ctx context.Context, hash common.Hash, fullTx bool) (json.RawMessage, error) { + return e.b.GetBlockByHash(ctx, hash, fullTx) } func (e *EthFrontend) GetBlockReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]*types.Receipt, error) { return e.b.GetBlockReceipts(ctx, blockNrOrHash) } -func (e *EthFrontend) ChainId(ctx context.Context) (eth.ChainID, error) { +func (e *EthFrontend) ChainId(ctx context.Context) (hexutil.Big, error) { return e.b.ChainId(ctx) } diff --git a/op-sync-tester/synctester/frontend/sync.go b/op-sync-tester/synctester/frontend/sync.go index 6e780c42bd0a1..e4e8f7beb2fad 100644 --- a/op-sync-tester/synctester/frontend/sync.go +++ b/op-sync-tester/synctester/frontend/sync.go @@ -2,14 +2,14 @@ package frontend import ( "context" + + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type SyncBackend interface { - GetSession(ctx context.Context) error - DeleteSession(ctx context.Context) error - ListSessions(ctx context.Context) ([]string, error) + apis.SyncAPI } - type SyncFrontend struct { b SyncBackend } @@ -18,7 +18,7 @@ func NewSyncFrontend(b SyncBackend) *SyncFrontend { return &SyncFrontend{b: b} } -func (s *SyncFrontend) GetSession(ctx context.Context) error { +func (s *SyncFrontend) GetSession(ctx context.Context) (*eth.SyncTesterSession, error) { return s.b.GetSession(ctx) } @@ -29,3 +29,7 @@ func (s *SyncFrontend) DeleteSession(ctx context.Context) error { func (s *SyncFrontend) ListSessions(ctx context.Context) ([]string, error) { return s.b.ListSessions(ctx) } + +func (s *SyncFrontend) ResetSession(ctx context.Context) error { + return s.b.ResetSession(ctx) +} diff --git a/op-sync-tester/synctester/middleware.go b/op-sync-tester/synctester/middleware.go index 314baf461a375..522e37c002227 100644 --- a/op-sync-tester/synctester/middleware.go +++ b/op-sync-tester/synctester/middleware.go @@ -8,17 +8,31 @@ import ( "strings" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/session" "github.com/google/uuid" ) var ErrInvalidSessionIDFormat = errors.New("invalid UUID") var ErrInvalidParams = errors.New("invalid param") +var ErrInvalidELSyncTarget = errors.New("invalid el sync target") + +const ELSyncTargetKey = "el_sync_target" + +func IsValidSessionID(sessionID string) error { + u, err := uuid.Parse(sessionID) + if err != nil { + return fmt.Errorf("invalid session id format: %w", err) + } + if u.Version() == 4 { + return nil + } + return errors.New("session format must satisfy uuid4 format") +} // parseSession inspects the incoming request to determine if it targets a session-specific route. // If the request path matches the pattern `/chain/{chain_id}/synctest/{uuid}`, it attempts to parse -// the UUID and optional query parameters (`latest`, `safe`, `finalized`) used to initialize the session. +// the UUID and optional query parameters (`latest`, `safe`, `finalized`, `el_sync_target`) used to +// initialize the session. // // If parsing succeeds, a backend.Session is attached to the request context, and the URL path is // rewritten to `/chain/{chain_id}/synctest` to enable consistent routing downstream. @@ -30,18 +44,17 @@ var ErrInvalidParams = errors.New("invalid param") // /chain/{chain_id}/synctest/{session_uuid} // // Returns an error if the session UUID is invalid or any query parameter is malformed. -func parseSession(r *http.Request, log log.Logger) (*http.Request, error) { +func parseSession(r *http.Request) (*http.Request, error) { segments := strings.Split(strings.Trim(r.URL.Path, "/"), "/") if len(segments) == 4 && segments[0] == "chain" && segments[2] == "synctest" { sessionID := segments[3] - if _, err := uuid.Parse(sessionID); err != nil { - return r, ErrInvalidSessionIDFormat + if err := IsValidSessionID(sessionID); err != nil { + return r, errors.Join(ErrInvalidSessionIDFormat, err) } query := r.URL.Query() parseParam := func(name string) (uint64, error) { raw := query.Get(name) if raw == "" { - log.Warn("Parameter not provided. Defaulting to 0", "param", name) return 0, nil } val, err := strconv.ParseUint(raw, 10, 64) @@ -62,13 +75,19 @@ func parseSession(r *http.Request, log log.Logger) (*http.Request, error) { if err != nil { return r, err } - session := &backend.Session{ - SessionID: sessionID, - Latest: latest, - Safe: safe, - Finalized: finalized, + elSyncTarget, err := parseParam(ELSyncTargetKey) + if err != nil { + return r, err + } + elSyncActive := false + if elSyncTarget != 0 { + if elSyncTarget < latest { + return r, ErrInvalidELSyncTarget + } + elSyncActive = true } - ctx := backend.WithSession(r.Context(), session) + sess := eth.NewSyncTesterSession(sessionID, latest, safe, finalized, elSyncTarget, elSyncActive) + ctx := session.WithSyncTesterSession(r.Context(), sess) // remove uuid path for routing r.URL.Path = "/" + strings.Join(segments[:3], "/") r = r.WithContext(ctx) diff --git a/op-sync-tester/synctester/middleware_test.go b/op-sync-tester/synctester/middleware_test.go index f02330b676b6e..f37968ce2b960 100644 --- a/op-sync-tester/synctester/middleware_test.go +++ b/op-sync-tester/synctester/middleware_test.go @@ -3,11 +3,11 @@ package synctester import ( "net/http" "net/url" + "strconv" "testing" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/session" "github.com/google/uuid" "github.com/stretchr/testify/require" ) @@ -29,17 +29,19 @@ func TestParseSession_Valid(t *testing.T) { query.Set(eth.Finalized, "80") req := newRequest("/chain/1/synctest/"+id, query) - newReq, err := parseSession(req, log.New()) + newReq, err := parseSession(req) require.NoError(t, err) require.NotNil(t, newReq) - session, ok := backend.SessionFromContext(newReq.Context()) + session, ok := session.SyncTesterSessionFromContext(newReq.Context()) require.True(t, ok) require.NotNil(t, session) require.Equal(t, id, session.SessionID) - require.Equal(t, uint64(100), session.Latest) - require.Equal(t, uint64(90), session.Safe) - require.Equal(t, uint64(80), session.Finalized) + require.Equal(t, uint64(100), session.InitialState.Latest) + require.Equal(t, uint64(90), session.InitialState.Safe) + require.Equal(t, uint64(80), session.InitialState.Finalized) + require.Equal(t, session.InitialState.Latest, session.Validated) + require.Equal(t, session.InitialState, session.CurrentState) require.Equal(t, "/chain/1/synctest", newReq.URL.Path) } @@ -47,33 +49,60 @@ func TestParseSession_DefaultsToZero(t *testing.T) { id := uuid.New().String() req := newRequest("/chain/1/synctest/"+id, nil) - newReq, err := parseSession(req, log.New()) + newReq, err := parseSession(req) require.NoError(t, err) require.NotNil(t, newReq) - session, ok := backend.SessionFromContext(newReq.Context()) + session, ok := session.SyncTesterSessionFromContext(newReq.Context()) require.True(t, ok) require.NotNil(t, session) require.Equal(t, id, session.SessionID) - require.Equal(t, uint64(0), session.Latest) - require.Equal(t, uint64(0), session.Safe) - require.Equal(t, uint64(0), session.Finalized) + require.Equal(t, uint64(0), session.InitialState.Latest) + require.Equal(t, uint64(0), session.InitialState.Safe) + require.Equal(t, uint64(0), session.InitialState.Finalized) + require.Equal(t, session.InitialState.Latest, session.Validated) + require.Equal(t, session.InitialState, session.CurrentState) +} + +func TestParseSession_ELSyncTarget(t *testing.T) { + id := uuid.New().String() + query := url.Values{} + elSyncTarget := uint64(4) + query.Set(ELSyncTargetKey, strconv.Itoa(int(elSyncTarget))) + + req := newRequest("/chain/1/synctest/"+id, query) + + newReq, err := parseSession(req) + require.NoError(t, err) + require.NotNil(t, newReq) + + session, ok := session.SyncTesterSessionFromContext(newReq.Context()) + require.True(t, ok) + require.NotNil(t, session) + require.Equal(t, id, session.SessionID) + require.Equal(t, uint64(0), session.InitialState.Latest) + require.Equal(t, uint64(0), session.InitialState.Safe) + require.Equal(t, uint64(0), session.InitialState.Finalized) + require.Equal(t, session.InitialState.Latest, session.Validated) + require.Equal(t, session.InitialState, session.CurrentState) + require.True(t, session.ELSyncActive) + require.Equal(t, session.ELSyncTarget, elSyncTarget) } func TestParseSession_NoSessionInitialized(t *testing.T) { req := newRequest("/chain/1/synctest", nil) - newReq, err := parseSession(req, log.New()) + newReq, err := parseSession(req) require.NoError(t, err) require.Same(t, req, newReq) - _, ok := backend.SessionFromContext(newReq.Context()) + _, ok := session.SyncTesterSessionFromContext(newReq.Context()) require.False(t, ok) } func TestParseSession_InvalidSessionIDFormat(t *testing.T) { req := newRequest("/chain/1/synctest/not-a-uuid", nil) - _, err := parseSession(req, log.New()) + _, err := parseSession(req) require.ErrorIs(t, err, ErrInvalidSessionIDFormat) } @@ -83,6 +112,19 @@ func TestParseSession_InvalidQueryParam(t *testing.T) { query.Set(eth.Unsafe, "not-a-number") // invalid uint64 req := newRequest("/chain/1/synctest/"+id, query) - _, err := parseSession(req, log.New()) + _, err := parseSession(req) require.ErrorIs(t, err, ErrInvalidParams) } + +func TestParseSession_InvalidELSyncTarget(t *testing.T) { + id := uuid.New().String() + query := url.Values{} + latest := 4 + elSyncTarget := latest - 1 + query.Set(eth.Unsafe, strconv.Itoa(latest)) + query.Set(ELSyncTargetKey, strconv.Itoa(elSyncTarget)) + + req := newRequest("/chain/1/synctest/"+id, query) + _, err := parseSession(req) + require.ErrorIs(t, err, ErrInvalidELSyncTarget) +} diff --git a/op-sync-tester/synctester/service.go b/op-sync-tester/synctester/service.go index 530f3d754564b..7417785606c40 100644 --- a/op-sync-tester/synctester/service.go +++ b/op-sync-tester/synctester/service.go @@ -147,8 +147,8 @@ func (s *Service) initHTTPServer(cfg *config.Config) error { endpoint := net.JoinHostPort(cfg.RPC.ListenAddr, strconv.Itoa(cfg.RPC.ListenPort)) // middleware to initialize session handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - r, err := parseSession(r, s.log) - if errors.Is(err, ErrInvalidSessionIDFormat) || errors.Is(err, ErrInvalidParams) { + r, err := parseSession(r) + if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -213,9 +213,16 @@ func (s *Service) RPC() string { return s.httpServer.HTTPEndpoint() } -func (s *Service) SyncTesterEndpoint(chainID eth.ChainID) string { - uuid := uuid.New() - return fmt.Sprintf("%s/chain/%s/synctest/%s", s.RPC(), chainID, uuid) +func (s *Service) SyncTesterRPC(chainID eth.ChainID, withSessionID bool) string { + return s.RPC() + s.SyncTesterRPCPath(chainID, withSessionID) +} + +func (s *Service) SyncTesterRPCPath(chainID eth.ChainID, withSessionID bool) string { + path := fmt.Sprintf("/chain/%s/synctest", chainID) + if withSessionID { + path = fmt.Sprintf("%s/%s", path, uuid.New()) + } + return path } func (s *Service) SyncTesters() map[sttypes.SyncTesterID]eth.ChainID { diff --git a/op-test-sequencer/sequencer/backend/work/builders/fakepos/builder.go b/op-test-sequencer/sequencer/backend/work/builders/fakepos/builder.go index e18c8e389bf21..e6ac58b1035d1 100644 --- a/op-test-sequencer/sequencer/backend/work/builders/fakepos/builder.go +++ b/op-test-sequencer/sequencer/backend/work/builders/fakepos/builder.go @@ -2,15 +2,16 @@ package fakepos import ( "context" + "fmt" + "math/big" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth/catalyst" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" ) type Beacon interface { @@ -18,22 +19,22 @@ type Beacon interface { } type Blockchain interface { - CurrentBlock() *types.Header - GetHeaderByNumber(number uint64) *types.Header - GetHeaderByHash(hash common.Hash) *types.Header - CurrentFinalBlock() *types.Header - CurrentSafeBlock() *types.Header - Genesis() *types.Block - Config() *params.ChainConfig + // All methods are assumed to have identical behavior to the corresponding methods on + // go-ethereum/ethclient.Client. + + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) + HeaderByHash(context.Context, common.Hash) (*types.Header, error) } type Builder struct { id seqtypes.BuilderID log log.Logger - engine *catalyst.ConsensusAPI + engine geth.EngineAPI beacon Beacon blockchain Blockchain + genesis *types.Header + config types.BlockType registry work.Jobs @@ -48,13 +49,19 @@ type Builder struct { var _ work.Builder = (*Builder)(nil) func NewBuilder(ctx context.Context, id seqtypes.BuilderID, opts *work.ServiceOpts, config *Config) (work.Builder, error) { + genesis, err := config.Backend.HeaderByNumber(context.Background(), new(big.Int)) + if err != nil { + return nil, fmt.Errorf("get genesis header: %w", err) + } return &Builder{ id: id, log: opts.Log, + genesis: genesis, + config: config.ChainConfig, registry: opts.Jobs, - engine: catalyst.NewConsensusAPI(config.GethBackend), + engine: config.EngineAPI, beacon: config.Beacon, - blockchain: config.GethBackend.BlockChain(), + blockchain: config.Backend, withdrawalsIndex: 1001, envelopes: make(map[common.Hash]*engine.ExecutionPayloadEnvelope), finalizedDistance: config.FinalizedDistance, diff --git a/op-test-sequencer/sequencer/backend/work/builders/fakepos/config.go b/op-test-sequencer/sequencer/backend/work/builders/fakepos/config.go index 7f5151129eafc..f4f09040f3ac1 100644 --- a/op-test-sequencer/sequencer/backend/work/builders/fakepos/config.go +++ b/op-test-sequencer/sequencer/backend/work/builders/fakepos/config.go @@ -1,9 +1,14 @@ package fakepos -import "github.com/ethereum/go-ethereum/eth" +import ( + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum/go-ethereum/core/types" +) type Config struct { - GethBackend *eth.Ethereum + ChainConfig types.BlockType + Backend Blockchain + EngineAPI geth.EngineAPI Beacon Beacon FinalizedDistance uint64 SafeDistance uint64 diff --git a/op-test-sequencer/sequencer/backend/work/builders/fakepos/job.go b/op-test-sequencer/sequencer/backend/work/builders/fakepos/job.go index 272f21cdb49f7..b22f8cef20ed3 100644 --- a/op-test-sequencer/sequencer/backend/work/builders/fakepos/job.go +++ b/op-test-sequencer/sequencer/backend/work/builders/fakepos/job.go @@ -4,6 +4,7 @@ import ( "context" "encoding/binary" "errors" + "math/big" "math/rand" "sync" "time" @@ -19,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" ) type FakePoSEnvelope struct { @@ -59,25 +61,38 @@ func (j *Job) Cancel(ctx context.Context) error { } func (j *Job) setHeadSafeAndFinalized() { - j.head = j.b.blockchain.CurrentBlock() // default head + var err error + j.head, err = j.b.blockchain.HeaderByNumber(context.Background(), nil) + if err != nil { + panic("chain head not found") + } if j.parent != (common.Hash{}) { - j.head = j.b.blockchain.GetHeaderByHash(j.parent) // override head if parent is set + j.head, err = j.b.blockchain.HeaderByHash(context.Background(), j.parent) // override head if parent is set + if err != nil { + panic("chain head's parent not found") + } } - j.finalized = j.b.blockchain.CurrentFinalBlock() - if j.finalized == nil { // fallback to genesis if nothing is finalized - j.finalized = j.b.blockchain.Genesis().Header() + j.finalized, err = j.b.blockchain.HeaderByNumber(context.Background(), new(big.Int).SetInt64(int64(rpc.FinalizedBlockNumber))) + if err != nil { // fallback to genesis if nothing is finalized + j.finalized = j.b.genesis } - j.safe = j.b.blockchain.CurrentSafeBlock() - if j.safe == nil { // fallback to finalized if nothing is safe + j.safe, err = j.b.blockchain.HeaderByNumber(context.Background(), new(big.Int).SetInt64(int64(rpc.SafeBlockNumber))) + if err != nil { // fallback to finalized if nothing is safe j.safe = j.finalized } if j.head.Number.Uint64() > j.b.finalizedDistance { // progress finalized block, if we can - j.finalized = j.b.blockchain.GetHeaderByNumber(j.head.Number.Uint64() - j.b.finalizedDistance) + j.finalized, err = j.b.blockchain.HeaderByNumber(context.Background(), new(big.Int).SetUint64(j.head.Number.Uint64()-j.b.finalizedDistance)) + if err != nil { + panic("no block found finalizedDistance behind head") + } } if j.head.Number.Uint64() > j.b.safeDistance { // progress safe block, if we can - j.safe = j.b.blockchain.GetHeaderByNumber(j.head.Number.Uint64() - j.b.safeDistance) + j.safe, err = j.b.blockchain.HeaderByNumber(context.Background(), new(big.Int).SetUint64(j.head.Number.Uint64()-j.b.safeDistance)) + if err != nil { + panic("no block found safeDistance behind head") + } } j.parentBeaconBlockRoot = fakeBeaconBlockRoot(j.head.Time) // parent beacon block root @@ -139,7 +154,7 @@ func (j *Job) Open(ctx context.Context) error { // modify gas limit so that we get a different block envelope.ExecutionPayload.GasLimit = envelope.ExecutionPayload.GasLimit + 100 - block, err := engine.ExecutableDataToBlockNoHash(*envelope.ExecutionPayload, make([]common.Hash, 0), &j.parentBeaconBlockRoot, make([][]byte, 0), j.b.blockchain.Config()) + block, err := engine.ExecutableDataToBlockNoHash(*envelope.ExecutionPayload, make([]common.Hash, 0), &j.parentBeaconBlockRoot, make([][]byte, 0), j.b.config) if err != nil { j.logger.Error("failed to convert executable data to block", "err", err) return err @@ -187,7 +202,7 @@ func (j *Job) Seal(ctx context.Context) (work.Block, error) { } if envelope.BlobsBundle != nil { - slot := (envelope.ExecutionPayload.Timestamp - j.b.blockchain.Genesis().Time()) / j.b.blockTime + slot := (envelope.ExecutionPayload.Timestamp - j.b.genesis.Time) / j.b.blockTime if j.b.beacon == nil { j.logger.Error("no blobs storage available") return nil, errors.New("no blobs storage available") diff --git a/op-test-sequencer/sequencer/backend/work/builders/standardbuilder/config.go b/op-test-sequencer/sequencer/backend/work/builders/standardbuilder/config.go index 1da2cbb4cfcef..3804eda73a2c4 100644 --- a/op-test-sequencer/sequencer/backend/work/builders/standardbuilder/config.go +++ b/op-test-sequencer/sequencer/backend/work/builders/standardbuilder/config.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + "github.com/ethereum/go-ethereum/params" ) type Config struct { @@ -23,6 +24,8 @@ type Config struct { L2EL endpoint.MustRPC `yaml:"l2EL"` // L2 consensus-layer RPC endpoint L2CL endpoint.MustRPC `yaml:"l2CL"` + + L1ChainConfig *params.ChainConfig } func (c *Config) Start(ctx context.Context, id seqtypes.BuilderID, opts *work.ServiceOpts) (work.Builder, error) { @@ -87,7 +90,7 @@ func (c *Config) Start(ctx context.Context, id seqtypes.BuilderID, opts *work.Se if err != nil { return nil, err } - fb := derive.NewFetchingAttributesBuilder(cfg, depSet, l1Cl, l2Cl) + fb := derive.NewFetchingAttributesBuilder(cfg, c.L1ChainConfig, depSet, l1Cl, l2Cl) fb.TestSkipL1OriginCheck() diff --git a/op-up/.gitignore b/op-up/.gitignore new file mode 100644 index 0000000000000..ba077a4031add --- /dev/null +++ b/op-up/.gitignore @@ -0,0 +1 @@ +bin diff --git a/op-up/.goreleaser.yaml b/op-up/.goreleaser.yaml new file mode 100644 index 0000000000000..c7cb05c73f85f --- /dev/null +++ b/op-up/.goreleaser.yaml @@ -0,0 +1,63 @@ +# yaml-language-server: $schema=https://goreleaser.com/static/schema-pro.json +# vim: set ts=2 sw=2 tw=0 fo=cnqoj + +version: 2 + +project_name: op-up + +before: + hooks: + - go mod tidy + +builds: + - id: main + main: . + binary: "{{ .ProjectName }}" + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + goarch: + - amd64 + - arm64 + ignore: + - goos: linux + goarch: arm64 + mod_timestamp: "{{ .CommitTimestamp }}" + ldflags: + - -X main.GitCommit={{ .FullCommit }} + - -X main.GitDate={{ .CommitDate }} + - -X main.Version={{ .Version }} + - -X main.VersionMeta= + +archives: + - format: tar.gz + # this name template makes the OS and Arch compatible with the results of `uname`. + name_template: "{{ .ProjectName }}-{{ .Version }}-{{ tolower .Os }}-{{ .Arch }}" + wrap_in_directory: true + files: [] + +dockers: + - goos: linux + goarch: amd64 + dockerfile: Dockerfile + image_templates: + - "us-docker.pkg.dev/oplabs-tools-artifacts/images/{{ .ProjectName }}:{{ .Tag }}" + +changelog: + sort: asc + filters: + exclude: + - "^docs:" + - "^test:" + +release: + github: + owner: ethereum-optimism + name: optimism + make_latest: false + +monorepo: + tag_prefix: op-up/ + dir: op-up diff --git a/op-deployer/Dockerfile.default b/op-up/Dockerfile similarity index 61% rename from op-deployer/Dockerfile.default rename to op-up/Dockerfile index 0821bc0c48ee4..4bf4c311bfc0c 100644 --- a/op-deployer/Dockerfile.default +++ b/op-up/Dockerfile @@ -1,9 +1,9 @@ FROM debian:bookworm-20240812-slim -ENTRYPOINT ["/op-deployer"] -COPY op-deployer /op-deployer +ENTRYPOINT ["/op-up"] +COPY op-up /op-up # Install ca-certificates so that HTTPS requests work RUN apt-get update && apt-get install -y ca-certificates # Symlink onto the PATH -RUN ln -s /op-deployer /usr/local/bin/op-deployer \ No newline at end of file +RUN ln -s /op-up /usr/local/bin/op-up diff --git a/op-up/install.sh b/op-up/install.sh new file mode 100755 index 0000000000000..8ff37ccf60db6 --- /dev/null +++ b/op-up/install.sh @@ -0,0 +1,193 @@ +#!/bin/sh + +# Credit where it's due: much of this script is copied from or inspired by [rustup] and [foundryup]. +# +# [rustup]: https://sh.rustup.rs +# [foundrup]: https://foundry.paradigm.xyz + +# All configs are here. +# If you modify the configs in any way, please also update the help text below. +OP_UP_VERSION="${OP_UP_VERSION:-0.2.0}" # The default version is hardcoded for now. +OP_UP_REPO="${OP_UP_REPO:-ethereum-optimism/optimism}" +OP_UP_DIR="${OP_UP_DIR:-"${HOME}/.op-up"}" + +if [ "$#" != 0 ]; then + echo "The op-up installer. + +When no parameters are passed, the op-up command is installed from GitHub. +Anything else causes this help text to be printed. + +The installation is configured via environment variables: + +OP_UP_REPO: + The GitHub repo from which to download op-up. + (default: ${OP_UP_REPO}) + +OP_UP_VERSION: + The semver-formatted version of the op-up command to install. + (default: ${OP_UP_VERSION}) + +OP_UP_DIR: + The main directory for the op-up command. The install directory is \"\${OP_UP_DIR}/bin\". + (default: ${OP_UP_DIR}) + +NO_COLOR: + Disables pretty text when set and nonempty. https://no-color.org/ + +The script only understands the GitHub releases API. +On error, the script exits with status code 1." + exit +fi + +# Use pretty text when the user's environment most likely supports it. +_text_bold='' +_text_red='' +_text_reset='' +# Ensure the script is running in a terminal, NO_COLOR is unset or empty, and tput is available. +if [ -t 1 ] && [ -z "${NO_COLOR-}" ] && command -v tput >/dev/null 2>&1; then + ncolors=$(tput colors 2>/dev/null || printf 0) + # Checking for 8 colors helps avoid weird edge cases on legacy or misconfigured systems. + if [ "$ncolors" -ge 8 ]; then + _text_bold=$(tput bold) + _text_red=$(tput setaf 1) + _text_reset=$(tput sgr0) + fi +fi + +say() { + printf "op-up-installer: %s\n" "$1" +} + +shout() { + printf '%s' "${_text_bold}${_text_red}" + say "${1}${_text_reset}" +} + +err() { + say "error: ${1}" >&2 + exit 1 +} + +ensure() { + if ! "$@"; then err "command failed: ${*}"; fi +} + +# Get the os type. + +_ostype="$(uname -s)" + +case "$_ostype" in + Linux) + _ostype=linux + ;; + Darwin) + _ostype=darwin + ;; + *) + err "os type is not Linux or Darwin: $_ostype" + ;; +esac + +# Get the cpu type. + +_cputype="$(uname -m)" + +# Working around Mac idiosyncrasies. +if [ "$_ostype" = Darwin ]; then + # Darwin `uname -m` can lie due to Rosetta shenanigans. If you manage to + # invoke a native shell binary and then a native uname binary, you can + # get the real answer, but that's hard to ensure, so instead we use + # `sysctl` (which doesn't lie) to check for the actual architecture. + if [ "$_cputype" = i386 ]; then + # Handling i386 compatibility mode in older macOS versions (<10.15) + # running on x86_64-based Macs. + # Starting from 10.15, macOS explicitly bans all i386 binaries from running. + # See: + + # Avoid `sysctl: unknown oid` stderr output and/or non-zero exit code. + if sysctl hw.optional.x86_64 2> /dev/null || true | grep -q ': 1'; then + _cputype=amd64 + fi + elif [ "$_cputype" = x86_64 ]; then + # Handling x86-64 compatibility mode (a.k.a. Rosetta 2) + # in newer macOS versions (>=11) running on arm64-based Macs. + # Rosetta 2 is built exclusively for x86-64 and cannot run i386 binaries. + + # Avoid `sysctl: unknown oid` stderr output and/or non-zero exit code. + if sysctl hw.optional.arm64 2> /dev/null || true | grep -q ': 1'; then + _cputype=arm64 + fi + fi +fi + +case "$_cputype" in + aarch64 | arm64) + _cputype=arm64 + ;; + x86_64 | x86-64 | x64 | amd64) + _cputype=amd64 + ;; + *) + err "unsupported cpu type: $_cputype" +esac + +# Download the binary. + +_binary_name="op-up" + +_target="${_ostype}-${_cputype}" +say "downloading for target ${_target}..." +_file_without_ext="${_binary_name}-${OP_UP_VERSION}-${_target}" +_url="https://github.com/${OP_UP_REPO}/releases/download/${_binary_name}/v${OP_UP_VERSION}/${_file_without_ext}.tar.gz" +_archive=$(mktemp) || err "create temporary file" +ensure curl --location --proto '=https' --tlsv1.2 --silent --show-error --fail "$_url" --output "$_archive" +say 'downloaded' + +# Extract to the destination. + +say "installing..." +_install_dir="${OP_UP_DIR}/bin" +mkdir -p "$_install_dir" +ensure tar --verbose --extract --file "$_archive" --directory "$_install_dir" --strip-components 1 +ensure chmod +x "${_install_dir}/${_binary_name}" +say 'installed' + +# Update the PATH if necessary. + +case ":${PATH}:" in + + *":${_install_dir}:"*) + ;; + + *) + + say 'updating PATH...' + say "finding shell profile for shell ${SHELL}..." + case "$SHELL" in + */zsh) + _profile="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + _profile="${HOME}/.bashrc" + ;; + */fish) + _profile="${HOME}/.config/fish/config.fish" + ;; + */ash) + _profile="${HOME}/.profile" + ;; + *) + err "could not detect shell, manually add ${_install_dir} to your PATH." + esac + say "shell profile found at ${_profile}" + + echo >> "$_profile" + if [ "$SHELL" = fish ]; then + echo "fish_add_path -a ${_install_dir}" >> "$_profile" + else + echo "export PATH=\"\${PATH}:${_install_dir}\"" >> "$_profile" + fi + say 'updated PATH' + shout "run 'source ${_profile}' or start a new terminal session to use op-up" + +esac diff --git a/op-up/justfile b/op-up/justfile new file mode 100644 index 0000000000000..af964d4935767 --- /dev/null +++ b/op-up/justfile @@ -0,0 +1,20 @@ +import '../justfiles/go.just' + +_LDFLAGSSTRING := "'" + trim( + "-X main.Version=" + VERSION + " " + \ + "-X main.VersionMeta=" + VERSION_META + " " + \ + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "") + "'" + +BINARY := "./bin/op-up" + +default: op-up + +op-up: (go_build BINARY "." "-ldflags" _LDFLAGSSTRING) + +artifacts: + just ../op-deployer/copy-contract-artifacts + +clean: + rm -f {{ BINARY }} diff --git a/op-up/main.go b/op-up/main.go new file mode 100644 index 0000000000000..1dd31fd5a06de --- /dev/null +++ b/op-up/main.go @@ -0,0 +1,478 @@ +package main + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "io" + "log/slog" + "net/http" + "os" + "os/signal" + "path/filepath" + "runtime/debug" + "slices" + "sync" + "syscall" + "time" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + opservice "github.com/ethereum-optimism/optimism/op-service" + "github.com/ethereum-optimism/optimism/op-service/cliapp" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/log/logfilter" + "github.com/ethereum-optimism/optimism/op-service/testreq" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/urfave/cli/v2" + "go.opentelemetry.io/otel/trace" +) + +const asciiArt = ` ____ ____ _ ____ +/ _ \/ __\ / \ /\/ __\ +| / \|| \/|_____ | | ||| \/| +| \_/|| __/\____\| \_/|| __/ +\____/\_/ \____/\_/` + +var ( + Version = "v0.0.0" + VersionMeta = "dev" + GitCommit string + GitDate string + + envPrefix = "OP_UP" + dirFlag = &cli.PathFlag{ + Name: "dir", + Usage: "the path to the op-up directory, which is used for caching among other things.", + EnvVars: opservice.PrefixEnvVar(envPrefix, "DIR"), + Value: func() string { + parentDir, err := os.UserHomeDir() + if err != nil { + parentDir, err = os.Getwd() + if err != nil { + return "error: could not find home or working directories" + } + } + return filepath.Join(parentDir, ".op-up") + }(), + } +) + +func main() { + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM, os.Interrupt) + defer cancel() + if err := run(ctx, os.Args, os.Stdout, os.Stderr); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func run(ctx context.Context, args []string, stdout, stderr io.Writer) error { + app := cli.NewApp() + app.Writer = stdout + app.ErrWriter = stderr + app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, VersionMeta) + app.Name = "op-up" + app.Usage = "deploys an in-memory OP Stack devnet." + app.Flags = cliapp.ProtectFlags([]cli.Flag{dirFlag}) + // The default OnUsageError behavior will print the error twice: once in the cli package and + // once in our main function. + // The function below prints help and returns the error for further handling/error messages. + app.OnUsageError = func(cliCtx *cli.Context, err error, isSubcommand bool) error { + if !cliCtx.App.HideHelp { + _ = cli.ShowAppHelp(cliCtx) + } + return err + } + app.Action = func(cliCtx *cli.Context) error { + return runOpUp(cliCtx.Context, cliCtx.App.ErrWriter, cliCtx.String(dirFlag.Name)) + } + return app.RunContext(ctx, args) +} + +func runOpUp(ctx context.Context, stderr io.Writer, opUpDir string) error { + fmt.Fprintf(stderr, "%s\n", asciiArt) + + if err := os.MkdirAll(opUpDir, 0o755); err != nil { + return fmt.Errorf("create the op-up dir: %w", err) + } + deployerCacheDir := filepath.Join(opUpDir, "deployer", "cache") + if err := os.MkdirAll(deployerCacheDir, 0o755); err != nil { + return fmt.Errorf("create the deployer cache dir: %w", err) + } + + devtest.RootContext = ctx + + p := newP(ctx, stderr) + defer p.Close() + + ids := sysgo.NewDefaultMinimalSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID) + opts := stack.Combine( + sysgo.WithMnemonicKeys(devkeys.TestMnemonic), + + sysgo.WithDeployer(), + sysgo.WithDeployerOptions( + sysgo.WithEmbeddedContractSources(), + sysgo.WithCommons(ids.L1.ChainID()), + sysgo.WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), + ), + sysgo.WithDeployerPipelineOption(sysgo.WithDeployerCacheDir(deployerCacheDir)), + + sysgo.WithL1Nodes(ids.L1EL, ids.L1CL), + + sysgo.WithL2ELNode(ids.L2EL), + sysgo.WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, sysgo.L2CLSequencer()), + + sysgo.WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL), + sysgo.WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil), + + sysgo.WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL}), + ) + + orch := sysgo.NewOrchestrator(p, opts) + stack.ApplyOptionLifecycle[*sysgo.Orchestrator](opts, orch) + if err := runSysgo(ctx, stderr, orch); err != nil { + return err + } + fmt.Fprintf(stderr, "\nPlease consider filling out this survey to influence future development: https://www.surveymonkey.com/r/JTGHFK3\n") + return nil +} + +func newP(ctx context.Context, stderr io.Writer) devtest.P { + logHandler := oplog.NewLogHandler(stderr, oplog.DefaultCLIConfig()) + logHandler = logfilter.WrapFilterHandler(logHandler) + logHandler.(logfilter.FilterHandler).Set(logfilter.DefaultMute()) + logHandler = logfilter.WrapContextHandler(logHandler) + logger := log.NewLogger(logHandler) + oplog.SetGlobalLogHandler(logHandler) + logger.SetContext(ctx) + onFail := func(now bool) { + logger.Error("Main failed") + debug.PrintStack() + if now { + panic("critical Main fail") + } + } + p := devtest.NewP(ctx, logger, onFail, func() { + onFail(true) + }) + return p +} + +func runSysgo(ctx context.Context, stderr io.Writer, orch *sysgo.Orchestrator) error { + // Print available account. + hd, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + if err != nil { + return fmt.Errorf("new mnemonic dev keys: %w", err) + } + const funderIndex = 10_000 // see sysgo/deployer.go. + funderUserKey := devkeys.UserKey(funderIndex) + funderAddress, err := hd.Address(funderUserKey) + if err != nil { + return fmt.Errorf("address: %w", err) + } + funderPrivKey, err := hd.Secret(funderUserKey) + if err != nil { + return fmt.Errorf("secret: %w", err) + } + + fmt.Fprintf(stderr, "Test Account Address: %s\n", funderAddress) + fmt.Fprintf(stderr, "Test Account Private Key: %s\n", "0x"+common.Bytes2Hex(crypto.FromECDSA(funderPrivKey))) + fmt.Fprintf(stderr, "EL Node URL: %s\n", "http://localhost:8545") + + t := &testingT{ + ctx: ctx, + cleanups: make([]func(), 0), + } + defer t.doCleanup() + sys := shim.NewSystem(t) + orch.Hydrate(sys) + l2Networks := sys.L2Networks() + if len(l2Networks) != 1 { + return fmt.Errorf("need one l2 network, got: %d", len(l2Networks)) + } + l2Net := l2Networks[0] + elNode := l2Net.L2ELNode(match.FirstL2EL) + + // Log on new blocks. + go func() { + const blockPollInterval = 500 * time.Millisecond + var lastBlock uint64 + for { + select { + case <-ctx.Done(): + return + case <-time.After(blockPollInterval): + unsafe, err := elNode.EthClient().BlockRefByLabel(ctx, eth.Unsafe) + if err != nil { + continue + } + if unsafe.Number != lastBlock { + fmt.Fprintf(stderr, "New L2 block: number %d, hash %s\n", unsafe.Number, unsafe.Hash) + lastBlock = unsafe.Number + } + } + } + }() + + // Proxy L2 EL requests. + go func() { + if err := proxyEL(stderr, elNode.L2EthClient().RPC()); err != nil { + fmt.Fprintf(stderr, "error: %v", err) + } + }() + + <-ctx.Done() + + return nil +} + +// proxyEL is a hacky way to intercept EL json rpc requests for logging to get around log filtering +// bugs. +func proxyEL(stderr io.Writer, client client.RPC) error { + // Set up the HTTP handler for all incoming requests. + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + // Ensure the request method is POST, as JSON RPC typically uses POST. + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + // Read the entire request body. + requestBody, err := io.ReadAll(r.Body) + if err != nil { + http.Error(w, "Failed to read request body", http.StatusInternalServerError) + return + } + defer r.Body.Close() // Close the request body after reading + + // Parse the incoming JSON RPC request. We use a map to dynamically + // extract the method, parameters, and ID. + var req map[string]any + if err := json.Unmarshal(requestBody, &req); err != nil { + http.Error(w, "Invalid JSON RPC request format", http.StatusBadRequest) + return + } + + // Extract the RPC method name. + method, ok := req["method"].(string) + if !ok { + http.Error(w, "Missing or invalid 'method' field in JSON RPC request", http.StatusBadRequest) + return + } + + // Extract RPC parameters. JSON RPC parameters can be an array, an object, or null/missing. + var callParams []any + if p, ok := req["params"]; ok && p != nil { + if arr, isArray := p.([]any); isArray { + // If parameters are an array, spread them directly. + callParams = arr + } else if obj, isObject := p.(map[string]any); isObject { + // If parameters are a JSON object, pass the entire object as a single argument. + callParams = []any{obj} + } else { + http.Error(w, "Invalid 'params' field in JSON RPC request (must be array, object, or null)", http.StatusBadRequest) + return + } + } + // If 'params' is missing or null, `callParams` remains empty, which is correct for methods without parameters. + + // Extract the request ID. This is crucial for matching responses to requests. + id := req["id"] // ID can be string, number, or null. We don't need to check `ok` for this. + + // Prepare a variable to hold the RPC response result. + // `json.RawMessage` is used to capture the raw JSON value from the backend + // without needing to know its specific Go type beforehand. + var rpcResult json.RawMessage + + // Create a context with a timeout for the RPC call to the backend. + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) // 30-second timeout + defer cancel() // Ensure the context is cancelled to release resources + + fmt.Fprintf(stderr, "%s\n", method) + + // Use the rpc.Client to make the actual call to the backend Ethereum node. + // The `callParams...` syntax unpacks the slice into variadic arguments. + err = client.CallContext(ctx, &rpcResult, method, callParams...) + if err != nil { + message := fmt.Sprintf("RPC call to backend failed for method '%s': %v", method, err) + // If the RPC call to the backend fails, construct a JSON RPC error response. + rpcErr := map[string]any{ + "jsonrpc": "2.0", + "id": id, + "error": map[string]any{ + "code": -32000, // Standard JSON RPC server error code for internal errors + "message": message, + }, + } + fmt.Fprintf(stderr, "RPC error: %s\n", message) + jsonResponse, _ := json.Marshal(rpcErr) // Marshaling error is unlikely here, so we ignore it. + w.Header().Set("Content-Type", "application/json") + // For JSON-RPC, errors are typically returned with an HTTP 200 OK status, + // with the error details within the JSON payload. + w.WriteHeader(http.StatusOK) + if _, err := w.Write(jsonResponse); err != nil { + return + } + return + } + + // If the RPC call was successful, construct the JSON RPC success response. + responseMap := map[string]any{ + "jsonrpc": "2.0", + "id": id, + "result": rpcResult, // The raw JSON result from the backend node + } + + jsonResponse, err := json.Marshal(responseMap) + if err != nil { + http.Error(w, "Failed to marshal RPC success response", http.StatusInternalServerError) + return + } + + // Set the Content-Type header and write the successful JSON RPC response. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + if _, err := w.Write(jsonResponse); err != nil { + return + } + }) + + // Start the HTTP server. + if err := http.ListenAndServe("localhost:8545", nil); err != nil { + return fmt.Errorf("listen and server: %w", err) + } + return nil +} + +type testingT struct { + mu sync.Mutex + ctx context.Context + cleanups []func() +} + +var _ devtest.T = (*testingT)(nil) +var _ testreq.TestingT = (*testingT)(nil) + +func (t *testingT) doCleanup() { + t.mu.Lock() + defer t.mu.Unlock() + for _, cleanup := range slices.Backward(t.cleanups) { + cleanup() + } +} + +// Cleanup implements devtest.T. +func (t *testingT) Cleanup(fn func()) { + t.mu.Lock() + defer t.mu.Unlock() + t.cleanups = append(t.cleanups, fn) +} + +// Ctx implements devtest.T. +func (t *testingT) Ctx() context.Context { + return t.ctx +} + +// Deadline implements devtest.T. +func (t *testingT) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +// Error implements devtest.T. +func (t *testingT) Error(args ...any) { +} + +// Errorf implements devtest.T. +func (t *testingT) Errorf(format string, args ...any) { +} + +// Fail implements devtest.T. +func (t *testingT) Fail() { +} + +// FailNow implements devtest.T. +func (t *testingT) FailNow() { +} + +// Gate implements devtest.T. +func (t *testingT) Gate() *testreq.Assertions { + return testreq.New(t) +} + +// Helper implements devtest.T. +func (t *testingT) Helper() { +} + +// Log implements devtest.T. +func (t *testingT) Log(args ...any) { +} + +// Logf implements devtest.T. +func (t *testingT) Logf(format string, args ...any) { +} + +func (t *testingT) Logger() log.Logger { + return log.NewLogger(slog.NewTextHandler(io.Discard, nil)) +} + +func (t *testingT) Name() string { + return "dev" +} + +func (t *testingT) Parallel() { +} + +func (t *testingT) Require() *testreq.Assertions { + return testreq.New(t) +} + +func (t *testingT) Run(name string, fn func(devtest.T)) { + panic("unimplemented") +} + +func (t *testingT) Skip(args ...any) { + panic("unimplemented") +} + +func (t *testingT) SkipNow() { + panic("unimplemented") +} + +// Skipf implements devtest.T. +func (t *testingT) Skipf(format string, args ...any) { + panic("unimplemented") +} + +// Skipped implements devtest.T. +func (t *testingT) Skipped() bool { + return false +} + +// TempDir implements devtest.T. +func (t *testingT) TempDir() string { + panic("unimplemented") +} + +// Tracer implements devtest.T. +func (t *testingT) Tracer() trace.Tracer { + panic("unimplemented") +} + +// WithCtx implements devtest.T. +func (t *testingT) WithCtx(ctx context.Context) devtest.T { + return t +} + +// _TestOnly implements devtest.T. +func (t *testingT) TestOnly() { +} diff --git a/op-up/main_test.go b/op-up/main_test.go new file mode 100644 index 0000000000000..038550d9d3195 --- /dev/null +++ b/op-up/main_test.go @@ -0,0 +1,48 @@ +package main + +import ( + "context" + "io" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +func TestRun(t *testing.T) { + var wg sync.WaitGroup + defer wg.Wait() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + errCh := make(chan error) + wg.Add(1) + go func() { + defer wg.Done() + defer close(errCh) + if err := run(ctx, []string{"op-up", "--dir", t.TempDir()}, io.Discard, io.Discard); err != nil { + errCh <- err + } + }() + + client, err := ethclient.DialContext(ctx, "http://localhost:8545") + require.NoError(t, err) + ticker := time.NewTicker(time.Millisecond * 250) + for { + select { + case e := <-errCh: + require.NoError(t, e) + case <-ticker.C: + chainID, err := client.ChainID(ctx) + if err != nil { + t.Logf("error while querying chain ID, will retry: %s", err) + continue + } + require.Equal(t, sysgo.DefaultL2AID.ToBig(), chainID) + return + } + } +} diff --git a/op-wheel/cheat/cheat.go b/op-wheel/cheat/cheat.go index 0f1ce16c88e42..e81487a182d3f 100644 --- a/op-wheel/cheat/cheat.go +++ b/op-wheel/cheat/cheat.go @@ -21,7 +21,6 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/leveldb" "github.com/ethereum/go-ethereum/params" @@ -48,7 +47,11 @@ func OpenGethRawDB(dataDirPath string, readOnly bool) (ethdb.Database, error) { if err != nil { return nil, fmt.Errorf("failed to open leveldb: %w", err) } - db, err := rawdb.NewDatabaseWithFreezer(kvs, filepath.Join(dataDirPath, "ancient"), "", readOnly) + db, err := rawdb.Open(kvs, rawdb.OpenOptions{ + Ancient: filepath.Join(dataDirPath, "ancient"), + MetricsNamespace: "", + ReadOnly: readOnly, + }) if err != nil { return nil, fmt.Errorf("failed to open db with freezer: %w", err) } @@ -61,8 +64,7 @@ func OpenGethDB(dataDirPath string, readOnly bool) (*Cheater, error) { if err != nil { return nil, err } - ch, err := core.NewBlockChain(db, nil, nil, nil, - beacon.New(ethash.NewFullFaker()), vm.Config{}, nil) + ch, err := core.NewBlockChain(db, nil, beacon.New(ethash.NewFullFaker()), nil) if err != nil { _ = db.Close() return nil, fmt.Errorf("failed to open blockchain around chain db: %w", err) diff --git a/ops/ai-eng/.gitignore b/ops/ai-eng/.gitignore new file mode 100644 index 0000000000000..f72adc946f5c9 --- /dev/null +++ b/ops/ai-eng/.gitignore @@ -0,0 +1,32 @@ +# AI Engineering project ignores + +# Output directories +*/output/ +output/ + +# Log file +contracts-test-maintenance/log.jsonl + +# Python cache +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python + +# Virtual environments +venv/ +env/ +ENV/ + +# IDE files +.vscode/ +.idea/ + +# Temporary files +*.tmp +*.temp + +# OS files +.DS_Store +Thumbs.db diff --git a/ops/ai-eng/contracts-test-maintenance/VERSION b/ops/ai-eng/contracts-test-maintenance/VERSION new file mode 100644 index 0000000000000..d15723fbe8de3 --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/VERSION @@ -0,0 +1 @@ +0.3.2 diff --git a/ops/ai-eng/contracts-test-maintenance/components/devin-api/devin_client.py b/ops/ai-eng/contracts-test-maintenance/components/devin-api/devin_client.py new file mode 100644 index 0000000000000..d30a716b9b9dd --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/components/devin-api/devin_client.py @@ -0,0 +1,214 @@ +""" +Script to create and monitor Devin AI sessions for contract test maintenance. +Loads prompt from the prompt renderer output and sends it to the Devin API, +then monitors the session until completion while logging the results. +""" + +from datetime import datetime +import glob +import json +import os +from pathlib import Path +import time +import urllib.request + +# Load .env file +if os.path.exists(".env"): + with open(".env") as f: + for line in f: + if "=" in line and not line.strip().startswith("#"): + key, value = line.strip().split("=", 1) + os.environ[key] = value.strip("\"'").strip() + + +def find_prompt_file(): + """Find the latest generated prompt file from the prompt renderer output.""" + output_dir = "../prompt-renderer/output" + prompt_files = glob.glob(f"{output_dir}/*_prompt.md") + + if not prompt_files: + raise FileNotFoundError(f"No prompt files found in {output_dir}") + + if len(prompt_files) > 1: + raise ValueError(f"Multiple prompt files found in {output_dir}: {prompt_files}") + + return prompt_files[0] + + +def load_prompt_from_file(file_path): + """Load and return the contents of a prompt file.""" + with open(file_path, "r", encoding="utf-8") as f: + return f.read().strip() + + +def log_session(session_id, status, session_data): + """Log PR link and final status to JSONL file.""" + # Extract run_id and selected files from existing data + try: + prompt_file = find_prompt_file() + run_id = os.path.basename(prompt_file).replace("_prompt.md", "") + run_time = datetime.strptime(run_id, "%Y%m%d_%H%M%S").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + ranking_file = f"../tests_ranker/output/{run_id}_ranking.json" + with open(ranking_file, "r") as f: + data = json.load(f) + selected_files = { + "test_path": data["entries"][0]["test_path"], + "contract_path": data["entries"][0]["contract_path"], + } + except Exception as e: + print(f"Error retrieving run data: {e}") + run_id = None + run_time = None + selected_files = {} + + # Read system version + version_file = Path(__file__).parent.parent.parent / "VERSION" + try: + with open(version_file, "r") as f: + system_version = f.read().strip() + except (FileNotFoundError, IOError): + system_version = "unknown" + + log_entry = { + "system_version": system_version, + "run_id": run_id, + "run_time": run_time, + "devin_session_id": session_id, + "selected_files": selected_files, + "status": status, + } + + # Only add PR link if status is finished + if status == "finished" and session_data: + pr_url = session_data.get("pull_request", {}).get("url") + if pr_url: + log_entry["pull_request_url"] = pr_url + + with open("../../log.jsonl", "a") as f: + f.write(json.dumps(log_entry) + "\n") + + +def _make_request(url, headers, data=None, method="GET"): + """Make HTTP request to Devin API and return JSON response.""" + try: + req = urllib.request.Request(url, data=data, headers=headers, method=method) + with urllib.request.urlopen(req, timeout=30) as response: + return json.loads(response.read().decode("utf-8")) + except urllib.error.HTTPError as e: + if e.code == 504: + print(f"Server timeout (504) - will retry") + return None + else: + print(f"Request failed: {method} {url}") + print(f"Error: {e}") + raise + except TimeoutError as e: + print(f"Request timeout - will retry") + return None + except Exception as e: + print(f"Request failed: {method} {url}") + print(f"Error: {e}") + raise + + +def _validate_environment(): + """Validate required environment variables.""" + api_key = os.getenv("DEVIN_API_KEY") + base_url = os.getenv("DEVIN_API_BASE_URL") + + if not api_key: + raise ValueError("DEVIN_API_KEY environment variable not set") + if not base_url: + raise ValueError("DEVIN_API_BASE_URL environment variable not set") + + return api_key, base_url + + +def _create_headers(api_key, content_type=None): + """Create HTTP headers with authorization and optional content type.""" + headers = {"Authorization": f"Bearer {api_key}"} + if content_type: + headers["Content-Type"] = content_type + return headers + + +def create_session(prompt): + """Create a new Devin session with the given prompt.""" + api_key, base_url = _validate_environment() + + print(f"Creating session at: {base_url}/sessions") + headers = _create_headers(api_key, "application/json") + data = json.dumps({"prompt": prompt}).encode("utf-8") + + response_data = _make_request(f"{base_url}/sessions", headers, data, "POST") + session_id = response_data["session_id"] + + print(f"Created session: {session_id}") + return session_id + + +def monitor_session(session_id): + """Monitor session status until completion.""" + api_key, base_url = _validate_environment() + headers = _create_headers(api_key) + last_status = None + retry_delay = 60 # Start with 1 minute + + while True: + try: + status = _make_request(f"{base_url}/sessions/{session_id}", headers) + + # Handle server timeout (no response) - retry with backoff + if status is None: + print(f"Retrying in {retry_delay} seconds...") + time.sleep(retry_delay) + retry_delay = min(retry_delay * 2, 480) # Cap at 8 minutes + continue + + # Reset retry delay on successful request + retry_delay = 60 + current_status = status.get("status_enum") + + # Handle Devin setup phase (status_enum is None but we got a response) + if current_status is None: + print("Devin is setting up...") + time.sleep(5) + continue + + # Only print when status changes and is meaningful + if current_status and current_status != last_status: + print(f"Status: {current_status}") + last_status = current_status + + # Stop monitoring for non-working statuses + if current_status in ["blocked", "expired", "finished"]: + print(f"Session finished with status: {current_status}") + log_session(session_id, current_status, status) + return + + time.sleep(5) + except KeyboardInterrupt: + print( + f"\nSession {session_id} is still running. Check Devin web interface for progress." + ) + return + + +def send_prompt(prompt): + """Create a session and monitor it until completion.""" + session_id = create_session(prompt) + monitor_session(session_id) + + +if __name__ == "__main__": + try: + prompt_file = find_prompt_file() + prompt = load_prompt_from_file(prompt_file) + print(f"Using prompt from: {prompt_file}") + send_prompt(prompt) + except (FileNotFoundError, ValueError) as e: + print(f"Error: {e}") + exit(1) diff --git a/ops/ai-eng/contracts-test-maintenance/components/prompt-renderer/render.py b/ops/ai-eng/contracts-test-maintenance/components/prompt-renderer/render.py new file mode 100644 index 0000000000000..62639cbca214c --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/components/prompt-renderer/render.py @@ -0,0 +1,93 @@ +""" +Script to render a prompt instance by replacing placeholders with actual test and contract paths +from the first entry in the ranking JSON file. +""" + +import json +from pathlib import Path + + +def load_ranking_data(): + """Load the ranking JSON file and return the first entry and run_id.""" + ranking_dir = Path(__file__).parent / "../tests_ranker" / "output" + + # Get the ranking file + ranking_file = next(ranking_dir.glob("*_ranking.json")) + + # Extract run_id from filename + run_id = ranking_file.stem.replace("_ranking", "") + + with open(ranking_file, "r") as f: + data = json.load(f) + + if not data.get("entries"): + raise ValueError(f"No entries found in {ranking_file.name}") + + return data["entries"][0], run_id + + +def load_prompt_template(): + """Load the prompt template markdown file.""" + prompt_file = Path(__file__).parent.parent.parent / "prompt" / "prompt.md" + + with open(prompt_file, "r") as f: + return f.read() + + +def render_prompt(template, test_path, contract_path): + """Replace the placeholders in the template with actual paths.""" + return template.replace("{TEST_PATH}", test_path).replace( + "{CONTRACT_PATH}", contract_path + ) + + +def save_prompt_instance(rendered_prompt, run_id): + """Save the rendered prompt to the output folder with run ID.""" + output_dir = Path(__file__).parent / "output" + output_dir.mkdir(exist_ok=True) + + # Remove old prompt files + for old_file in output_dir.glob("*_prompt.md"): + old_file.unlink() + + filename = f"{run_id}_prompt.md" + output_file = output_dir / filename + + with open(output_file, "w") as f: + f.write(rendered_prompt) + + return output_file + + +def main(): + """Main function to render and save the prompt instance.""" + try: + # Load ranking data and get run_id + first_entry, run_id = load_ranking_data() + test_path = first_entry["test_path"] + contract_path = first_entry["contract_path"] + + print(f"Using ranking from run {run_id}:") + print(f" Test path: {test_path}") + print(f" Contract path: {contract_path}") + + # Load prompt template + template = load_prompt_template() + + # Render the prompt with actual paths + rendered_prompt = render_prompt(template, test_path, contract_path) + + # Save the rendered prompt + output_file = save_prompt_instance(rendered_prompt, run_id) + + print(f"Prompt instance saved to: {output_file}") + + except Exception as e: + print(f"Error: {e}") + return 1 + + return 0 + + +if __name__ == "__main__": + exit(main()) diff --git a/ops/ai-eng/contracts-test-maintenance/components/tests_ranker/test_ranker.py b/ops/ai-eng/contracts-test-maintenance/components/tests_ranker/test_ranker.py new file mode 100644 index 0000000000000..0698f816ea774 --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/components/tests_ranker/test_ranker.py @@ -0,0 +1,421 @@ +"""Consolidated test ranking system for contracts-bedrock test files. + +This module combines all functionality from the tests_ranker package into a single file. +It provides utilities for discovering test files, mapping them to source contracts, +calculating staleness metrics, and generating ranked output. +""" + +from datetime import datetime, timezone +import json +from pathlib import Path +import subprocess +import time +import tomllib +from typing import Optional + + +# === Git Utilities === + + +def get_file_commit_timestamp(file_path: Path, repo_root: Path) -> Optional[int]: + """Get the timestamp of the last commit that modified a file. + + Args: + file_path: Path to the file. + repo_root: Path to the git repository root. + + Returns: + Unix timestamp of the last commit, or None if unable to determine. + """ + try: + # Get relative path from repo root + relative_path = file_path.relative_to(repo_root) + + # Run git log to get the last commit timestamp for this file + result = subprocess.run( + ["git", "log", "-1", "--format=%ct", "--", str(relative_path)], + cwd=repo_root, + capture_output=True, + text=True, + check=True, + ) + + if result.stdout.strip(): + return int(result.stdout.strip()) + + except (subprocess.CalledProcessError, ValueError, OSError): + pass + + return None + + +# === Scoring Utilities === + + +def calculate_staleness_days( + test_commit_ts: Optional[int], contract_commit_ts: Optional[int] +) -> Optional[float]: + """Calculate staleness in days between test and contract commits. + + Args: + test_commit_ts: Unix timestamp of test file's last commit. + contract_commit_ts: Unix timestamp of contract file's last commit. + + Returns: + Staleness in days (positive if contract is newer), or None if timestamps unavailable. + """ + if test_commit_ts is not None and contract_commit_ts is not None: + return (contract_commit_ts - test_commit_ts) / 86400 + return None + + +def calculate_test_score( + staleness_days: Optional[float], test_commit_ts: Optional[int] +) -> Optional[float]: + """Calculate test priority score using two-branch scoring algorithm. + + Args: + staleness_days: Staleness in days (positive if contract is newer). + test_commit_ts: Unix timestamp of test file's last commit. + + Returns: + Priority score (higher means more urgent), or None if cannot calculate. + """ + now_ts = int(time.time()) + + if staleness_days is not None: + if staleness_days > 0: + # Case 1: Contract newer than test - use staleness_days + return staleness_days + elif test_commit_ts is not None: + # Case 2: Test up to date or newer - use test age + return (now_ts - test_commit_ts) / 86400 + elif test_commit_ts is not None: + # Fallback: only test timestamp available - use test age + return (now_ts - test_commit_ts) / 86400 + + return None + + +# === Contract Mapping Utilities === + + +def get_base_paths() -> tuple[Path, Path, Path]: + """Get base paths for repository, contracts, and output directory. + + Returns: + Tuple of (repo_root, contracts_bedrock, output_dir) paths. + """ + repo_root = Path(__file__).parent.parents[4] + contracts_bedrock = repo_root / "packages" / "contracts-bedrock" + output_dir = Path(__file__).parent / "output" + return repo_root, contracts_bedrock, output_dir + + +def find_source_contract( + test_file_path: Path, contracts_bedrock: Path +) -> Optional[Path]: + """Map a test file to its corresponding source contract. + + Args: + test_file_path: Path to the test file (.t.sol). + contracts_bedrock: Path to the contracts-bedrock directory. + + Returns: + Path to the corresponding source contract, or None if not found. + """ + # Get the test file name without .t.sol extension + test_name = test_file_path.stem.replace(".t", "") + + # Get the relative directory structure from test/ + test_relative = test_file_path.relative_to(contracts_bedrock / "test") + test_dir = test_relative.parent + + # Try to find source contract in src/ with same directory structure + potential_source = contracts_bedrock / "src" / test_dir / f"{test_name}.sol" + + if potential_source.exists(): + return potential_source + + # Try without directory structure in src/ + for src_subdir in (contracts_bedrock / "src").rglob("*.sol"): + if src_subdir.name == f"{test_name}.sol": + return src_subdir + + return None + + +# === Exclusion Utilities === + + +def load_exclusions(contracts_bedrock: Path) -> tuple[list[Path], set[Path]]: + """Load and normalize exclusion paths from TOML configuration. + + Args: + contracts_bedrock: Path to the contracts-bedrock directory. + + Returns: + Tuple of (excluded_dirs, excluded_files) as normalized Path objects. + + Raises: + FileNotFoundError: If exclusions.toml file is not found. + tomllib.TOMLDecodeError: If TOML file is malformed. + """ + exclusions_file = Path(__file__).parent.parent.parent / "exclusion.toml" + + with exclusions_file.open("rb") as f: + exclusions = tomllib.load(f) + + excluded_dirs: list[Path] = [] + excluded_files: set[Path] = set() + + # Get exclusion directories and files + exclusion_config = exclusions.get("exclusions", {}) + exclusion_directories = exclusion_config.get("directories", []) + exclusion_files = exclusion_config.get("files", []) + + # Process directory exclusions + for directory in exclusion_directories: + # Directory exclusion - store as Path object without trailing slash + excluded_dirs.append(Path(directory.rstrip("/"))) + + # Process file exclusions + for file_path in exclusion_files: + # File exclusion - store as Path object in set for O(1) lookup + excluded_files.add(Path(file_path)) + + # Add recently processed files from log.jsonl (avoid immediate duplicates) + log_file = Path(__file__).parent.parent.parent / "log.jsonl" + if log_file.exists(): + cutoff = time.time() - (7 * 24 * 3600) # 7 days + try: + with open(log_file) as f: + for line in f: + entry = json.loads(line.strip()) + if ( + entry.get("status") in ["finished", "blocked", "failed"] + and entry.get("run_time") + and datetime.strptime( + entry["run_time"], "%Y-%m-%d %H:%M:%S" + ).timestamp() + > cutoff + ): + test_path = entry.get("selected_files", {}).get("test_path") + if test_path: + excluded_files.add(Path(test_path)) + except (json.JSONDecodeError, ValueError, KeyError): + pass + + return excluded_dirs, excluded_files + + +def is_path_excluded( + relative_path: Path, excluded_dirs: list[Path], excluded_files: set[Path] +) -> bool: + """Check if a path should be excluded based on exclusion rules. + + Args: + relative_path: Path relative to contracts-bedrock directory. + excluded_dirs: List of excluded directory paths. + excluded_files: Set of excluded file paths. + + Returns: + True if the path should be excluded, False otherwise. + """ + return relative_path in excluded_files or any( + relative_path.is_relative_to(excluded_dir) for excluded_dir in excluded_dirs + ) + + +# === File Discovery Utilities === + + +def find_test_files(contracts_bedrock: Path) -> list[Path]: + """Find all test files in the contracts-bedrock test directory. + + Args: + contracts_bedrock: Path to the contracts-bedrock directory. + + Returns: + Sorted list of test file paths. + """ + return sorted((contracts_bedrock / "test").rglob("*.t.sol")) + + +def filter_excluded_files( + test_files: list[Path], + contracts_bedrock: Path, + excluded_dirs: list[Path], + excluded_files: set[Path], +) -> list[Path]: + """Filter out excluded test files based on exclusion rules. + + Args: + test_files: List of test file paths. + contracts_bedrock: Path to the contracts-bedrock directory. + excluded_dirs: List of excluded directory paths. + excluded_files: Set of excluded file paths. + + Returns: + List of test files that are not excluded. + """ + filtered_files = [] + for file_path in test_files: + relative_path = file_path.relative_to(contracts_bedrock) + if not is_path_excluded(relative_path, excluded_dirs, excluded_files): + filtered_files.append(file_path) + return filtered_files + + +# === Output Generation Utilities === + + +def generate_ranking_json( + entries: list[dict[str, str | int | float | None]], output_dir: Path, run_id: str +) -> Path: + """Generate the ranking JSON file. + + Args: + entries: List of test-to-contract mappings with scores. + output_dir: Directory to write the output file. + run_id: Timestamp-based run identifier. + + Returns: + Path to the generated JSON file. + """ + # Ensure output directory exists + output_dir.mkdir(parents=True, exist_ok=True) + + # Remove old ranking files + for old_file in output_dir.glob("*_ranking.json"): + old_file.unlink() + + # Sort entries by score (descending), with None scores at the end + sorted_entries = sorted( + entries, key=lambda x: (x["score"] is None, -(x["score"] or 0)) + ) + + # Create ranking JSON + ranking = { + "run_id": run_id, + "generated_at": datetime.now(timezone.utc).isoformat(), + "entries": sorted_entries, + } + + # Write to output file with run_id + output_file = output_dir / f"{run_id}_ranking.json" + with output_file.open("w") as f: + json.dump(ranking, f, indent=2) + + return output_file + + +# === Main Application Logic === + + +def create_test_entry( + test_file: Path, + source_contract: Path, + contracts_bedrock: Path, + repo_root: Path, +) -> dict[str, str | int | float | None]: + """Create a single test entry with all calculated metrics. + + Args: + test_file: Path to the test file. + source_contract: Path to the corresponding source contract. + contracts_bedrock: Path to the contracts-bedrock directory. + repo_root: Path to the git repository root. + + Returns: + Dictionary with test metrics and scores. + """ + test_rel = str(test_file.relative_to(contracts_bedrock)) + source_rel = str(source_contract.relative_to(contracts_bedrock)) + + # Get commit timestamps + test_commit_ts = get_file_commit_timestamp(test_file, repo_root) + contract_commit_ts = get_file_commit_timestamp(source_contract, repo_root) + + # Calculate metrics + staleness_days = calculate_staleness_days(test_commit_ts, contract_commit_ts) + score = calculate_test_score(staleness_days, test_commit_ts) + + return { + "test_path": test_rel, + "contract_path": source_rel, + "test_commit_ts": test_commit_ts, + "contract_commit_ts": contract_commit_ts, + "staleness_days": staleness_days, + "score": score, + } + + +def collect_test_entries( + contracts_bedrock: Path, + excluded_dirs: list[Path], + excluded_files: set[Path], + repo_root: Path, +) -> list[dict[str, str | int | float | None]]: + """Collect test file entries and map them to source contracts. + + Args: + contracts_bedrock: Path to the contracts-bedrock directory. + excluded_dirs: List of excluded directory paths. + excluded_files: Set of excluded file paths. + repo_root: Path to the git repository root. + + Returns: + List of dictionaries with test_path, contract_path, commit timestamps, staleness_days, and score. + """ + # Find and filter test files + test_files = find_test_files(contracts_bedrock) + filtered_files = filter_excluded_files( + test_files, contracts_bedrock, excluded_dirs, excluded_files + ) + + entries = [] + for test_file in filtered_files: + # Find corresponding source contract + source_contract = find_source_contract(test_file, contracts_bedrock) + + if source_contract: + entry = create_test_entry( + test_file, source_contract, contracts_bedrock, repo_root + ) + entries.append(entry) + + return entries + + +def main() -> None: + """Main function to generate test ranking JSON.""" + try: + # Generate unique run ID + run_id = datetime.now().strftime("%Y%m%d_%H%M%S") + print(f"Starting ranking run: {run_id}") + + # Get base paths + repo_root, contracts_bedrock, output_dir = get_base_paths() + + # Load exclusions + excluded_dirs, excluded_files = load_exclusions(contracts_bedrock) + + # Collect test entries + entries = collect_test_entries( + contracts_bedrock, excluded_dirs, excluded_files, repo_root + ) + + # Generate ranking JSON with run_id + output_file = generate_ranking_json(entries, output_dir, run_id) + + print(f"Generated {output_file} with {len(entries)} entries") + print(f"Run ID: {run_id}") + + except Exception as e: + print(f"Error generating test ranking: {e}") + raise + + +if __name__ == "__main__": + main() diff --git a/ops/ai-eng/contracts-test-maintenance/docs/runbook.md b/ops/ai-eng/contracts-test-maintenance/docs/runbook.md new file mode 100644 index 0000000000000..3a8a37e3c30bd --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/docs/runbook.md @@ -0,0 +1,114 @@ +# AI Contract Test Maintenance System + +## Overview + +The AI Contract Test Maintenance System analyzes Solidity test files in the `contracts-bedrock` package and ranks them based on staleness metrics. It compares git commit timestamps between test files and their corresponding source contracts to identify which tests need attention most urgently. + +The system uses a two-branch scoring algorithm: tests whose contracts have moved ahead receive priority based on staleness days, while up-to-date tests are ranked by age to ensure continuous coverage. + +## Usage + +```bash +# From the ai-eng directory +just ai-contracts-test +``` + +Individual steps (for debugging): +```bash +just rank # Rank tests by staleness +just render # Generate prompt for highest-priority test +just devin # Execute with Devin API +``` + +## Output + +### Test Ranking Output + +The `just rank` command generates `components/tests_ranker/output/{run_id}_ranking.json`: + +```json +{ + "run_id": "20250922_143052", + "generated_at": "2025-09-22T14:30:52.517107+00:00", + "entries": [ + { + "test_path": "test/L1/ProtocolVersions.t.sol", + "contract_path": "src/L1/ProtocolVersions.sol", + "test_commit_ts": 1746564380, + "contract_commit_ts": 1738079001, + "staleness_days": -98.21, + "score": 135.84 + } + ] +} +``` + +**Entry fields:** + +- `run_id` - Unique identifier for this ranking run (YYYYMMDD_HHMMSS format) +- `generated_at` - ISO timestamp when the ranking was generated +- `test_path` - Relative path to test file from contracts-bedrock +- `contract_path` - Relative path to source contract from contracts-bedrock +- `test_commit_ts` - Unix timestamp of test file's last commit +- `contract_commit_ts` - Unix timestamp of contract file's last commit +- `staleness_days` - Calculated staleness (positive = contract newer) +- `score` - Priority score (higher = more urgent) + +### Prompt Renderer Output + +The `just render` command generates a markdown file in `components/prompt-renderer/output/` with the name format `{run_id}_prompt.md`. This file contains the AI prompt template with the highest-priority test and contract paths filled in, ready to be used for test maintenance analysis. + +For example, a run with ID `20250922_143052` will generate `20250922_143052_prompt.md`. The system automatically links prompts to their corresponding ranking runs through the shared run ID. + +### Devin API Client + +The Devin API client (`components/devin-api/devin_client.py`) automatically: + +1. **Finds the latest prompt** from the prompt renderer output +2. **Creates a Devin session** with the generated prompt +3. **Monitors the session** until completion ("blocked", "expired", or "finished") +4. **Logs results** to `log.jsonl` in the project root + +#### Prerequisites + +Devin API credentials in `components/devin-api/.env` + +#### Session Monitoring + +The client monitors Devin sessions with resilient error handling: +- **30-second request timeout** to prevent hanging +- **Exponential backoff retry** for server errors (1min → 2min → 4min → 8min) +- **Patient monitoring** for long-running sessions (30+ minutes for CI completion) + +#### Session Logging + +All Devin sessions are automatically logged to `log.jsonl` with: + +```json +{ + "run_id": "20250924_160648", + "run_time": "2025-09-24 16:06:48", + "devin_session_id": "sess_abc123", + "selected_files": { + "test_path": "test/libraries/Storage.t.sol", + "contract_path": "src/libraries/Storage.sol" + }, + "status": "finished", + "pull_request_url": "https://github.com/ethereum-optimism/optimism/pull/12345" +} +``` + +**Log fields:** +- `run_id` - Links to the ranking run that generated this session +- `run_time` - Human-readable timestamp of the run +- `devin_session_id` - Unique Devin session identifier +- `selected_files` - The test-contract pair that was worked on +- `status` - Final session status ("finished", "blocked", "expired", "failed") +- `pull_request_url` - GitHub PR URL (only present if status is "finished") + +#### Duplicate Prevention + +The ranking system automatically excludes files processed in the **last 7 days** to prevent duplicate work: +- Files with status `finished`, `blocked`, or `failed` are temporarily excluded +- After 7 days, files become available for ranking again (aligns with PR auto-close policy) +- This prevents immediate re-ranking of files still under review diff --git a/ops/ai-eng/contracts-test-maintenance/exclusion.toml b/ops/ai-eng/contracts-test-maintenance/exclusion.toml new file mode 100644 index 0000000000000..d049863db28c9 --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/exclusion.toml @@ -0,0 +1,26 @@ +# Exclusion configuration for contract test maintenance +# Separate configuration for directories and individual files + +[exclusions] +# Directory exclusions (exclude all files within these directories) +directories = [ + "test/invariants/", + "test/opcm/", + "test/scripts/", + "test/setup/" +] + +# Individual file exclusions +files = [ + "test/L1/OPContractsManagerContractsContainer.t.sol", + "test/dispute/lib/LibClock.t.sol", + "test/dispute/lib/LibGameId.t.sol", + "test/integration/ExecutingMessageEmitted.t.sol", + "test/invariants/Burn.Eth.t.sol", + "test/invariants/Burn.Gas.t.sol", + "test/libraries/DeployUtils.t.sol", + "test/universal/BenchmarkTest.t.sol", + "test/universal/ExtendedPause.t.sol", + "test/vendor/Initializable.t.sol", + "test/vendor/InitializableOZv5.t.sol" +] diff --git a/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md b/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md new file mode 100644 index 0000000000000..d039b93a9899d --- /dev/null +++ b/ops/ai-eng/contracts-test-maintenance/prompt/prompt.md @@ -0,0 +1,530 @@ +You are enhancing a Solidity test file to improve coverage and quality. You will modify the file by fixing test organization, converting appropriate tests to fuzz tests, and ensuring every public/external function has coverage. + + +You enhance test files by implementing comprehensive tests that improve coverage and quality. You prioritize improving existing tests over adding new ones. + + + +**Key Decision Points:** +- Fuzz or focused? → If testing same logic with different values, fuzz it +- New test or enhance existing? → Always enhance existing first +- Function-specific or Uncategorized? → Ask "What's the PRIMARY behavior I'm testing?" +- Test a getter? → Only if it has logic beyond returning storage (see getter_strategy) + +**Naming Patterns:** +- Test contracts: `TargetContract_FunctionName_Test` +- Helper contracts: `TargetContract_Feature_Harness` +- Test functions: `[method]_[functionName]_[scenario]_[outcome]` + +**Zero Tolerance:** +- vm.expectRevert() must ALWAYS have arguments (selector or bytes message) - CI failure if missing +- All tests must pass +- No removing existing tests + + + +MUST modify the test file with implementations. Analysis-only = failure. +Only make changes you're confident about - analyze code behavior before testing. +Don't guess or assume - if unsure, examine the source contract carefully. + + + +1. NO creating NEW tests for inherited functions - only test functions declared in target contract +2. NO failing tests kept - all must pass or task fails +3. NO removing ANY existing tests - even if they test inherited functions (enhance/modify instead) + + + +- Enhancement First: Always improve existing tests before adding new ones +- Function-First Organization: Every function gets its own test contract; Uncategorized_Test is reserved for true multi-function integration scenarios +- Preserve Behavior: Modify tests only to improve coverage/naming while keeping original functionality +- Contract Reality: Test what the contract DOES, not what you think it SHOULD do +- Target Contract Only: Never test inherited functions - only test functions declared in the contract under test +- Test Valid Scenarios: Focus on legitimate use cases and edge cases, not artificial failure modes from broken setup +- Test Intent vs Side Effects: Verify tests fail for their intended reason, not technical side effects +- Test Uniqueness: Each test must verify distinct logic - different values alone don't justify separate tests + + + +{TEST_PATH} +{CONTRACT_PATH} + + + +Enhance the provided Solidity test file following these objectives: +1. Convert regular tests to fuzz tests where appropriate +2. Add tests for uncovered code paths (if statements, branches, reverts) +3. Ensure every public/external function has at least one test +4. Organize all tests to match source function declaration order + +Focus on mechanical improvements that increase coverage and quality. + + + +**Structured Enhancement Methodology** + +This systematic approach ensures comprehensive test improvements without missing critical coverage: + +**Phase 1 - Enhancement Analysis** +*Goal: Maximize value from existing tests* +- Check existing imports and dependencies for available libraries before implementing helpers +- Identify tests that can be converted to fuzz tests for broader coverage +- Find tests that need stronger assertions or edge case coverage +- Flag redundant tests that duplicate existing verification +- Document all improvement opportunities before implementation + +**Phase 2 - Coverage Gap Analysis** +*Goal: Identify missing test coverage* +- Survey codebase patterns and existing utility libraries before custom implementations +- List functions without any test coverage +- Find untested code branches (if statements, error conditions) +- Identify missing edge cases and boundary conditions +- Document all gaps that need new tests + +**Phase 3 - Implementation & Validation** +*Goal: Apply improvements while maintaining all tests passing* +- Implement enhancements identified in Phase 1 +- Add new tests for gaps identified in Phase 2 +- Validate each change maintains expected behavior +- Ensure all tests pass before proceeding to organization + +**Phase 4 - Organization & Finalization** +*Goal: Clean structure that matches source code* +- Verify zero semgrep violations and compiler warnings +- Final validation to ensure all tests pass + +**Methodology Benefits:** +- Systematic coverage ensures no functions or edge cases are missed +- Enhancement-first approach maximizes existing test value +- Structured validation prevents breaking changes +- Consistent organization improves maintainability + +*These phases provide analytical structure - you can iterate between them as needed, but ensure each phase's goals are met for comprehensive coverage.* + + + + +**Test Contract Names:** +- `TargetContract_FunctionName_Test` - ONE contract per function (no exceptions) +- `TargetContract_Uncategorized_Test` - For multi-function integration tests only (NEVER use "Unclassified") +- `TargetContract_TestInit` - Shared setup contract +- Constants/ALL CAPS: Convert to PascalCase (e.g., `MAX_LIMIT` → `TargetContract_MaxLimit_Test`) + +**Helper Contract Names:** + - `TargetContract_Purpose_Harness` - Required format for ALL helper contracts + - Examples: `L1Bridge_MaliciousToken_Harness`, `Portal_InvalidProof_Harness` + - Purpose should describe what the helper enables/simulates + +**Test Function Names:** +- Format: `[method]_[functionName]_[scenario]_[outcome]` + - Methods: `test`, `testFuzz`, `testDiff` + - Outcomes: `succeeds`, `reverts`, `fails` (never `works`) +- ALL parameters use underscore prefix: `_param` +- Read-only tests MUST have `view` modifier + +**Uncategorized Test Functions:** +- Use descriptive names: `test_depositAndWithdraw_stateConsistency_succeeds` +- NEVER use "test_uncategorized_" prefix +- Good scenarios: `multipleOperations`, `crossFunction`, `integration`, `stateTransition` + +**FORBIDDEN:** +- Generic test contracts (Security_Test, Edge_Test, etc.) +- Generic helper names (Helper, Mock, CheckSender) +- All edge cases must go in function-specific or Uncategorized contracts +- "Unclassified_Test" contracts (must use "Uncategorized_Test") + + + +**Categorization Rules:** +When deciding between function-specific vs Uncategorized_Test: + +Function-Specific Test Contract: +- Primary goal: Test ONE function's behavior +- Even if the test calls other functions for setup or verification +- Example: Testing function X that uses Y for verification → goes in X_Test +- Example: Testing getBytes32 using setBytes32 for setup → goes in GetBytes32_Test, NOT Uncategorized +- Key question: What are your assertions actually testing? That determines the contract. + +Uncategorized_Test Contract: +- Primary goal: Test integration/interaction between multiple functions +- Testing scenarios that span multiple functions equally +- Example: Testing function A followed by B for integration → goes in Uncategorized_Test + +Ask yourself: "What is the PRIMARY behavior I'm testing?" The answer determines the categorization. + +**Expected Structure:** +Helper contracts → TestInit → function tests (in source order) → Uncategorized_Test last + +CRITICAL: Organization happens LAST, after all improvements are complete + +**COMMON CATEGORIZATION MISTAKES:** +- Putting tests in Uncategorized_Test just because they call multiple functions +- If you're only asserting on ONE function's output → it belongs in that function's test contract +- Setup/helper calls don't make it an integration test +- Real Uncategorized example: Testing deposit() followed by withdraw() to verify round-trip behavior +- Wrong Uncategorized usage: Testing getter after setter when only asserting the getter works + +**EMPTY CONTRACT CLEANUP:** +- If moving tests leaves a contract empty → DELETE the empty contract +- Empty test contracts are not placeholders - they're dead code +- This includes: Empty function-specific contracts, empty Uncategorized_Test + + + +Custom errors: `error ContractName_ErrorDescription()` +Test reverts: `vm.expectRevert(ContractName_Error.selector)` +Empty reverts: `vm.expectRevert(bytes(""))` - for reverts with no data +Events: `vm.expectEmit(true, true, true, true)` +Low-level calls: check both success=false and error selector +⚠️ CRITICAL CI FAILURE: vm.expectRevert() without arguments = automatic semgrep violation + + + +**Test Structure & Setup:** +- Test structure: Setup → Expectations → Action → Assertions +- Helper contracts must be declared at file level, never nested +- Add all required imports when using new types/contracts +- vm.expectRevert() must come BEFORE the reverting call, not after vm.prank + +**Test Value & Quality:** +- MEANINGFUL TESTS: Every test must have a clear pass/fail condition that validates specific behavior + If a test cannot fail or doesn't validate anything specific, it provides no value +- UNIQUENESS CHECK: Before creating a test, verify it tests different logic than existing tests + Different values testing the same condition = duplicate test = use fuzz instead +- FAILURE ANALYSIS: When a test expects failure, verify it fails for the intended reason + If testing X should fail, ensure it fails because of X, not unrelated technical issues + +**Code Efficiency:** +- LIBRARY CHECK: Before implementing helper functions, ask "Does existing functionality cover this?" + Check imports, dependencies, and similar files for patterns/libraries already in use +- EFFICIENCY CHECK: Use the simplest approach that achieves the goal + If testing all values is simpler than fuzzing (e.g., arrays with <10 items), test them all +- DRY PRINCIPLE: Extract common setup code rather than duplicating across tests + Repeated code belongs in setUp() or helper functions +- GETTER CHECK: Simple getters that only return storage values do NOT need separate tests + If already verified in initialization or other tests, skip the standalone test + Only test getters with complex logic or side effects + +**Implementation Details:** +- Before implementing helper functions, check for existing libraries (OpenZeppelin, Solady, etc.) +- Version testing: Use `assertGt(bytes(contractName.version()).length, 0);` not specific version strings +- Never use dummy values: hex"test" → use valid hex like hex"1234" or hex"" +- Check actual contract behavior before making assumptions + + + +**Special Scenarios:** +- Interface changes during development: Complete current tests first, then adapt to new interface +- Multiple contract interactions: Use Uncategorized_Test for true cross-contract integration +- Performance/gas tests: Include in function-specific test contracts +- Mock requirements: Create `TargetContract_MockDependency_Harness` helpers + + + + + +**Should I use a fuzz test?** + +YES - Use fuzz test when: +- Testing value ranges (amounts, timestamps, array lengths) +- Testing access control across multiple addresses +- Multiple input values should produce same behavior +- You would otherwise write multiple tests with different values + +NO - Use focused test when: +- You need a specific value that has special contract meaning +- Testing exact error messages that only occur at specific values +- Complex setup makes fuzzing impractical +- Small finite set (<10 items) where testing all is simpler + +**Decision shortcut:** If you're tempted to copy-paste a test with different values → use fuzz instead + +**CRITICAL Rules:** +- If your test needs a SPECIFIC value, DO NOT make it a fuzz test! + - Wrong: `testFuzz_foo_reverts(address _token) { vm.assume(_token == address(0)); }` + - Right: `test_foo_zeroAddress_reverts() { foo(address(0)); }` +- Fuzz tests must test actual behavior, not just "doesn't crash" +- Ensure proper setup for all fuzzed parameters +- Validate specific outcomes, not just absence of reverts + + + +Always use bound() for ranges: `_limit = bound(_limit, 0, MAX - 1)` +Only use vm.assume() when bound() isn't possible (e.g., address exclusions) +Check actual function requirements before adding constraints - don't assume +NEVER fuzz a parameter if you need a specific value - just use that value directly + + + +- Testing simple getters that are already verified in other tests (e.g., initialization) +- Redundant tests that duplicate existing coverage +- Tests focused on implementation details rather than breakable behavior +- Testing failures from invalid setup or configuration +- Testing specific values unless they have special contract meaning +- Creating tests for technical artifacts vs business logic validation +- Tests that pass/fail due to unrelated technical reasons rather than the intended business logic +- Multiple tests for the same condition with different values (unless values have special meaning) +- Tests that are logically equivalent despite using different numbers +- Tests that cannot fail or always pass regardless of input +- Testing undefined behavior without proper setup or context + + + +**Test getters that have:** +- Calculations or transformations (`balance * rate / 100`) +- External contract calls (`token.balanceOf(user)`) +- State changes or side effects +- Error conditions or validation logic + +**Skip getters that:** +- Only return storage values (`return _owner`) +- Are already verified in other tests (initialization, state changes) + +**Example:** If `initialize()` sets owner and verifies `getOwner()` returns it, no separate `getOwner()` test needed. + + + +Before creating any test, verify: +1. Can this test ever fail? If no → don't create it +2. What specific behavior am I validating? If unclear → reconsider +3. Does this test increase confidence in correctness? If no → skip it + +A test provides value only if: +- It has clear success and failure conditions +- It validates specific, expected behavior +- It could catch real bugs or regressions + + + +Maintain clean, efficient test code: +1. Choose the simplest approach - don't over-engineer +2. Extract repeated setup into helper functions or setUp() +3. Remove any "thinking out loud" comments before completion +4. If a set has <10 items, consider testing all rather than fuzzing + +Quality indicators: +- No duplicated code blocks across tests +- Clear, purposeful comments only +- Appropriate technique for the data size + + + + + +Inherited function test + +// StandardBridge.sol has bridgeETH() +// L1StandardBridge.sol inherits from StandardBridge +// In L1StandardBridge.t.sol: +contract L1StandardBridge_BridgeETH_Test { + function test_bridgeETH_succeeds() { // ❌ Testing inherited function + + +// Don't create this test - bridgeETH is inherited, not declared in L1StandardBridge + + + + +Duplicate boundary tests + +function test_challenge_boundary_reverts() { + vm.warp(challengedAt + window + 1); // ❌ Same logic +} +function test_challenge_afterWindow_reverts() { + vm.warp(challengedAt + window + 1); // ❌ Different name, same test +} + + +function testFuzz_challenge_afterWindow_reverts(uint256 _blocksAfter) { + _blocksAfter = bound(_blocksAfter, 1, 1000); + vm.warp(challengedAt + window + _blocksAfter); // ✓ Fuzz instead +} + + + + +Meaningless test that always passes + +function testFuzz_isEnabled_randomAddress_succeeds(address _random) public { + try module.isEnabled(_random) returns (bool result) { + assertTrue(true); // ❌ Always passes + } catch { + assertTrue(true); // ❌ Always passes + } +} + + +// Don't create this test - it cannot fail and validates nothing +// Either test specific addresses with expected outcomes or skip entirely + + + + +Redundant getter test + +contract ProtocolVersions_Required_Test { + function test_required_succeeds() external view { + // ❌ Getter already tested in initialize test + assertEq(protocolVersions.required(), required); + } +} + + +// Skip this test - the getter is already verified in test_initialize_succeeds() +// Only test getters with complex logic or side effects + + + + +Semgrep violation + +function test_validate_fails() external { + vm.expectRevert(); // ❌ Missing revert reason + validator.validate(params); +} + + +function test_validate_zeroAddress_reverts() external { + vm.expectRevert(Validator.InvalidParams.selector); // ✓ Specific selector + + +Enhancement vs new test + +// Existing test only checks basic case +function test_transfer_succeeds() { transfer(100); } + +// Creating separate test for edge case +function test_transfer_boundary_succeeds() { transfer(0); } // ❌ New test instead of enhancing + + +// Enhance existing test to cover both cases +function testFuzz_transfer_validAmount_succeeds(uint256 _amount) { + _amount = bound(_amount, 0, MAX_BALANCE); // ✓ Enhanced to cover all cases including boundary + transfer(_amount); +} + + + +Getter test misplaced in Uncategorized + +contract Storage_Uncategorized_Test { + function testFuzz_setGetBytes32Multi_succeeds(Slot[] calldata _slots) { + setter.setBytes32(slots); // Setup + for (uint256 i; i < slots.length; i++) { + assertEq(setter.getBytes32(slots[i].key), slots[i].value); // ❌ Only testing getter + } + } +} + + +contract Storage_GetBytes32_Test { + function testFuzz_getBytes32_multipleSlots_succeeds(Slot[] calldata _slots) { + setter.setBytes32(slots); // Setup is fine + for (uint256 i; i < slots.length; i++) { + assertEq(setter.getBytes32(slots[i].key), slots[i].value); // ✓ Testing getter + } + } +} + + + +Empty contract after reorganization + +// After moving test to GetBytes32_Test +contract Storage_Uncategorized_Test is Storage_TestInit { + // ❌ Empty contract left behind +} + + +// Contract completely removed from file ✓ +// No empty Storage_Uncategorized_Test remains + + + + + +- Use `/// @notice` to explain what the test verifies +- MANDATORY: Keep ALL comments under 100 characters +- Focus on what behavior is being tested +- For parameters: provide context, not redundancy ("address to test" → "random address for access control") +- In Uncategorized_Test: explain why multi-function testing is needed +- Remove any working notes or self-directed comments before finalizing +- Comments should explain the "why" for future readers, not document the thought process + + + +**MANDATORY VALIDATION STEPS:** +1. Run all tests: `just test-dev --match-path test/[folder]/[ContractName].t.sol -v` +2. Clean artifacts: `just clean` +3. Run pre-PR validation: `just pre-pr` + - This runs lint and fast checks + - MUST pass before creating any PR +4. Search for any vm.expectRevert() without arguments and fix them + +**ZERO TOLERANCE - CI FAILURES:** +- vm.expectRevert() must ALWAYS have arguments: either selector or bytes message +- ALL tests must pass - no exceptions +- NO compiler warnings allowed + +**TROUBLESHOOTING COMMON ISSUES:** +*Tests fail after changes:* +- Check the specific failure reason in logs +- Verify test setup matches contract requirements +- Don't revert improvements - fix the underlying issue + +*Semgrep violations:* +- Search for `vm.expectRevert()` without arguments +- Replace with `vm.expectRevert(ErrorName.selector)` or `vm.expectRevert(bytes("message"))` + +*Organization confusion:* +- Expected order: Helper contracts at top, Uncategorized last +- Function tests should follow source contract declaration order + +*Fuzz test failures:* +- Check if constraints properly bound the values +- Verify test setup works for all possible fuzzed inputs +- Consider if the fuzzed parameter needs a specific value instead + + + +**PULL REQUEST CREATION:** + +CRITICAL: Only proceed after ALL validation steps pass, especially `just pre-pr`. +- If `just pre-pr` fails → NO PR (fix issues first) +- This is a mandatory gate - no exceptions + +After successful validation, open a pull request using the default PR template. + +**Branch Naming:** +- Format: `ai/improve-[contract-name]-coverage` +- Example: `ai/improve-l1-standard-bridge-coverage` + + + +**Phase 1 - Enhancement Analysis:** +- Fuzz conversion opportunities: [count and list] +- Tests needing improvements: [count and list] + +**Phase 2 - Coverage Analysis:** +- Functions without tests: [count and list] +- Uncovered code paths: [count and list] + +**Phase 3 - Implementation Summary:** +- Tests converted to fuzz: [count with old→new names] +- New tests added: [count with names] +- All tests passing: [YES/NO] + +**Phase 4 - Organization:** +- Final order matches source: [YES/NO] +- Tests reorganized: [count if any needed to move] + +**Phase 5 - PR Submission:** +- Validation complete: [YES/NO] +- PR opened with default template: [YES/NO] + +**Commit Message:** +refactor(test): improve [ContractName] test coverage and quality +- add X tests for uncovered functions/paths +- convert Y tests to fuzz tests +- [other specific changes] + diff --git a/ops/ai-eng/graphite/rules.md b/ops/ai-eng/graphite/rules.md new file mode 100644 index 0000000000000..478567f4b04d1 --- /dev/null +++ b/ops/ai-eng/graphite/rules.md @@ -0,0 +1,26 @@ +# Diamond Code Review Rules + +This file explains the rules that you should use when reviewing a PR. + +## Applicability + +You are ONLY to review changes to Solidity files (*.sol). Do NOT leave comments on any other file types. + +## Rules for Reviewing Solidity Files + +This section applies to Solidity files ONLY. + +### Style Guide + +- Follow the style guide found at `.cursor/rules/solidity-styles.mdc` in the root of this repository. + +### Versioning + +- Verify that the version bumps being made in source files are appropriate for the type of change that was made. For example, a change that only modifies a contract should be a patch change, not a minor change. + +### Interfaces + +- Source files are expected to have a corresponding interface file in the `interfaces/` folder +- Do NOT review for missing interface files, CI checks will handle that +- Do NOT review for discrepancies between interface files and the source files, CI will handle that +- We do NOT require natspec comments in interface files, only in the source files diff --git a/ops/ai-eng/justfile b/ops/ai-eng/justfile new file mode 100644 index 0000000000000..a4f4cdb89d2c9 --- /dev/null +++ b/ops/ai-eng/justfile @@ -0,0 +1,31 @@ +# AI Engineering Tools + +# Contract Test Maintenance + +# Run the contract test ranking script +rank: + cd contracts-test-maintenance/components/tests_ranker && python3 test_ranker.py + +# Render prompt with the first ranked test and contract +render: + cd contracts-test-maintenance/components/prompt-renderer && python3 render.py + +# Run ranking and render prompt in one command +prompt: + just rank + just render + +# Run the Devin client +devin: + cd contracts-test-maintenance/components/devin-api && python3 devin_client.py + +# Run the complete cycle: rank, render, and send to Devin +ai-contracts-test: + # Step 1: Rank tests + just rank + + # Step 2: Render prompt + just render + + # Step 3: Send to Devin + just devin diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 5f40aaff94967..59e55c59fa521 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -36,7 +36,25 @@ RUN wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_$TARG # Install versioned toolchain COPY ./mise.toml . -RUN mise trust && mise install -v -y just && cp $(mise which just) /usr/local/bin/just && just --version +RUN mise trust +RUN mise install -v -y just && cp $(mise which just) /usr/local/bin/just && just --version +# mise does not install the alpine binary for forge, so we download it manually +COPY ./op-deployer/pkg/deployer/forge/version.json /tmp/op-deployer-versions.json +RUN set -e && \ + FORGE_VERSION=$(jq -r '.forge' /tmp/op-deployer-versions.json) && \ + mkdir -p /usr/local/bin && \ + FORGE_URL="https://github.com/foundry-rs/foundry/releases/download/${FORGE_VERSION}/foundry_${FORGE_VERSION}_alpine_${TARGETARCH}.tar.gz" && \ + echo "Downloading forge from: ${FORGE_URL}" && \ + if ! curl -SL "${FORGE_URL}" -o /tmp/foundry.tar.gz; then \ + echo "Failed to download forge from ${FORGE_URL}" >&2; \ + exit 1; \ + fi && \ + ls -la /tmp/foundry.tar.gz && \ + tar -tzf /tmp/foundry.tar.gz && \ + tar -xzf /tmp/foundry.tar.gz -C /usr/local/bin forge && \ + rm /tmp/foundry.tar.gz && \ + chmod +x /usr/local/bin/forge +# Note: can't check the forge version because in CI we're building arm64 images using a non-emulated x86_64 host, therefore the version check would fail # We copy the go.mod/sum first, so the `go mod download` does not have to re-run if dependencies do not change. COPY ./go.mod /app/go.mod @@ -146,8 +164,9 @@ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache FROM --platform=$BUILDPLATFORM builder AS op-deployer-builder ARG OP_DEPLOYER_VERSION=v0.0.0 -RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build cd op-chain-ops && make op-deployer \ - GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_DEPLOYER_VERSION" +RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build just \ + GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE VERSION="$OP_DEPLOYER_VERSION" \ + op-deployer/build-go FROM --platform=$BUILDPLATFORM builder AS dac-server-builder @@ -206,12 +225,14 @@ RUN apt-get update && apt-get install -y --no-install-recommends musl openssl ca COPY --from=op-challenger-builder /app/op-challenger/bin/op-challenger /usr/local/bin/ # Copy in op-program and cannon COPY --from=op-program-builder /app/op-program/bin/op-program /usr/local/bin/ +ENV OP_CHALLENGER_ASTERISC_SERVER=/usr/local/bin/op-program ENV OP_CHALLENGER_CANNON_SERVER=/usr/local/bin/op-program COPY --from=cannon-builder /app/cannon/bin/cannon /usr/local/bin/ ENV OP_CHALLENGER_CANNON_BIN=/usr/local/bin/cannon # Copy in kona and asterisc COPY --from=kona /usr/local/bin/kona-host /usr/local/bin/ ENV OP_CHALLENGER_ASTERISC_KONA_SERVER=/usr/local/bin/kona-host +ENV OP_CHALLENGER_CANNON_KONA_SERVER=/usr/local/bin/kona-host COPY --from=asterisc /usr/local/bin/asterisc /usr/local/bin/ ENV OP_CHALLENGER_ASTERISC_BIN=/usr/local/bin/asterisc CMD ["op-challenger"] @@ -245,7 +266,9 @@ COPY --from=op-test-sequencer-builder /app/op-test-sequencer/bin/op-test-sequenc CMD ["op-test-sequencer"] FROM $TARGET_BASE_IMAGE AS op-deployer-target -COPY --from=op-deployer-builder /app/op-chain-ops/bin/op-deployer /usr/local/bin/ +RUN apk add --no-cache ca-certificates +COPY --from=op-deployer-builder /usr/local/bin/forge /app/op-deployer/bin/op-deployer /usr/local/bin/ +ENV FORGE_ENV=alpine CMD ["op-deployer"] FROM $TARGET_BASE_IMAGE AS op-dripper-target diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index 5e98b81312ccf..b39673545c877 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -29,6 +29,7 @@ !/go.sum !/justfiles !/mise.toml +!/op-e2e/e2eutils **/bin **/testdata diff --git a/ops/scripts/latest-versions.sh b/ops/scripts/latest-versions.sh new file mode 100755 index 0000000000000..341281dc9364a --- /dev/null +++ b/ops/scripts/latest-versions.sh @@ -0,0 +1,140 @@ +#!/usr/bin/env bash +set -euo pipefail + +# latest-versions.sh - reads all remote tags from the origin repository, +# groups them by component, and then finds the latest version for each component. + +######################################################## +#### FUNCTIONS #### +######################################################## + +# find_latest_versions - finds both latest and stable versions in one pass +# +# Input: space-separated string of version numbers (e.g., "1.2.3 1.3.0-rc.1 1.2.4") +# Output: single line in format "latest_version|stable_version" +# where stable_version is empty if no stable (vX.Y.Z only) versions exist +# +# Latest: Uses custom precedence rules (non-suffix beats suffix with same base version) +# 1. Highest semantic version wins (e.g., 1.3.0 > 1.2.9) +# 2. For same base version, non-suffixed preferred over suffixed (e.g., 1.13.6 > 1.13.6-rc.3) +# 3. Higher base version beats lower, even if suffixed (e.g., 1.13.6-rc.1 > 1.13.5) +# 4. For same base version with multiple suffixes, higher lexicographical suffix wins (e.g., 1.5.3-rc.3 > 1.5.3-rc.1) +# Stable: Highest pure X.Y.Z format (no suffixes) +find_latest_versions() { + local versions="$1" + + # Convert space-separated string to array for iteration + read -ra version_array <<< "$versions" + + # Create sortable versions for both latest and stable + local sortable_versions=() + local stable_sortable_versions=() + + for ver in "${version_array[@]}"; do + # Extract base version (everything before first '-' suffix) + local base="${ver%%-*}" + + # Modifies the string (while preserving the original version via | separator) + # so lexicographical sort will work + if [[ "$ver" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + # stable (non-suffixed) versions: base.1.0 (priority 1, higher than any suffix) + local sortable_ver="$base.1.0|$ver" + sortable_versions+=("$sortable_ver") + stable_sortable_versions+=("$sortable_ver") + else + # suffixed versions: base.0.suffix (priority 0, lower than stable version) + local suffix="${ver#*-}" + sortable_versions+=("$base.0.$suffix|$ver") + fi + done + + # Find highest latest version using lexicographical sort + local latest_sortable + latest_sortable=$(printf '%s\n' "${sortable_versions[@]}" | sort -V | tail -n1) + local latest="${latest_sortable##*|}" + + # Find highest stable version using lexicographical sort + local stable="" + if [[ ${#stable_sortable_versions[@]} -gt 0 ]]; then + local stable_sortable + stable_sortable=$(printf '%s\n' "${stable_sortable_versions[@]}" | sort -V | tail -n1) + stable="${stable_sortable##*|}" + fi + + # Output in format "latest_version|stable_version" + echo "$latest|$stable" +} + +# Helper function to print component JSON +# Output example: +# "component": { +# "stable": "v1.0.0" (empty string if no stable version), +# "latest": "v1.0.0" +# } +print_component_json() { + local component="$1" + local stable_ver="$2" + local latest_ver="$3" + local is_first="$4" + + [[ "$is_first" != "true" ]] && echo "," + + local stable_field='""' + [[ -n "$stable_ver" ]] && stable_field="\"v$stable_ver\"" + + printf ' "%s": {\n "stable": %s,\n "latest": "v%s"\n }' \ + "$component" "$stable_field" "$latest_ver" +} + +######################################################## +#### MAIN #### +######################################################## + +declare -A component_versions # hash map: component -> "space-separated versions" +declare -A latest_versions # hash map: component -> latest version +declare -A stable_versions # hash map: component -> stable version + +# Collect all remote tags once and group by component in `component_versions` +while IFS= read -r tag; do + # Skip empty lines + [[ -z "$tag" ]] && continue + + # Skip ^{} annotated tags completely + [[ "$tag" == *"^{}" ]] && continue + + # git ls-remote output format: " refs/tags/" + # Only process tags that match our refs/tags//v pattern + if [[ "$tag" =~ refs/tags/([a-zA-Z0-9_-]+)/v(.+)$ ]]; then + component="${BASH_REMATCH[1]}" + version="${BASH_REMATCH[2]}" + + # Append version to component's list (space-separated) + if [[ -n "${component_versions[$component]:-}" ]]; then + component_versions["$component"]+=" $version" + else + component_versions["$component"]="$version" + fi + fi +done < <(git ls-remote --tags origin) + +# Process each component once and store results in `latest_versions`, `stable_versions` +for component in "${!component_versions[@]}"; do + result=$(find_latest_versions "${component_versions[$component]}") + latest_versions["$component"]="${result%|*}" # Everything before pipe delimiter + stable_versions["$component"]="${result#*|}" # Everything after pipe delimiter +done + +# Sort components alphabetically for consistent output +mapfile -t sorted_components < <(printf '%s\n' "${!latest_versions[@]}" | sort) + +# Print results in JSON format +echo "{" +for i in "${!sorted_components[@]}"; do + component="${sorted_components[i]}" + print_component_json "$component" \ + "${stable_versions[$component]}" \ + "${latest_versions[$component]}" \ + "$([ "$i" -eq 0 ] && echo true || echo false)" +done +echo "" +echo "}" diff --git a/packages/contracts-bedrock/book/src/contributing/opcm.md b/packages/contracts-bedrock/book/src/contributing/opcm.md index 99148513899d9..9bd43d623aadf 100644 --- a/packages/contracts-bedrock/book/src/contributing/opcm.md +++ b/packages/contracts-bedrock/book/src/contributing/opcm.md @@ -51,8 +51,7 @@ corresponding smart contract release, and upgrading existing chains from one ver smart contract release. Chains that are multiple versions behind must be upgraded in multiple stages across multiple OPCMs. -The OPCM supports upgrading Superchain-wide contracts like `ProtocolVersions` and the `SuperchainConfig`. The OPCM will -perform the upgrade when the user calling the `upgrade` method is also the `UpgradeController`. +The OPCM supports upgrading Superchain-wide contracts like `ProtocolVersions` and the `SuperchainConfig`. ## Usage diff --git a/packages/contracts-bedrock/book/src/contributing/style-guide.md b/packages/contracts-bedrock/book/src/contributing/style-guide.md index 0bfa18df11e79..a8686aa023ffd 100644 --- a/packages/contracts-bedrock/book/src/contributing/style-guide.md +++ b/packages/contracts-bedrock/book/src/contributing/style-guide.md @@ -29,9 +29,13 @@ -This document provides guidance on how we organize and write our smart contracts. For cases where -this document does not provide guidance, please refer to existing contracts for guidance, -with priority on the `L2OutputOracle` and `OptimismPortal`. +This document provides guidance on how we organize and write our smart contracts. + +Notes: +1. There are many cases where the code is not up to date with this guide, when in doubt, this guide + should take precedence. +2. For cases where this document does not provide guidance, please refer to existing contracts, + with priority on the `SystemConfig` and `OptimismPortal`. ## Standards and Conventions @@ -57,24 +61,90 @@ We also have the following custom tags: #### Errors -- Use `require` statements when making simple assertions. -- Use `revert(string)` if throwing an error where an assertion is not being made (no custom errors). - See [here](https://github.com/ethereum-optimism/optimism/blob/861ae315a6db698a8c0adb1f8eab8311fd96be4c/packages/contracts-bedrock/contracts/L2/OVM_ETH.sol#L31) - for an example of this in practice. -- Error strings MUST have the format `"{ContractName}: {message}"` where `message` is a lower case string. +- Prefer custom Solidity errors for all new errors. +- Name custom errors using `ContractName_ErrorDescription`. +- Use `revert ContractName_ErrorDescription()` to revert. +- Avoid `revert(string)` and string-typed error messages in new code. + +Example: + +```solidity +// ✅ Correct - Custom errors with contract-prefixed names +contract SystemConfig { + error SystemConfig_InvalidFeatureState(); + error SystemConfig_UnauthorizedCaller(address caller); + + address internal owner; + + function setFeature(bool _enabled) external { + if (msg.sender != owner) revert SystemConfig_UnauthorizedCaller(msg.sender); + if (!_enabled) revert SystemConfig_InvalidFeatureState(); + // ... + } +} + +// ❌ Incorrect - string-based reverts and contract-prefixed strings +function bad(uint256 _amount) external { + require(_amount > 0, "MyContract: amount must be > 0"); // Prefer custom error + revert("MyContract: unsupported"); // Avoid string reverts +} +``` #### Function Parameters - Function parameters should be prefixed with an underscore. +Example: + +```solidity +// ✅ Correct - parameters are prefixed with underscore +function setOwner(address _newOwner) external { + // ... +} + +// ❌ Incorrect - parameters without underscore prefix +function setOwner(address newOwner) external { + // ... +} +``` + #### Function Return Arguments - Arguments returned by functions should be suffixed with an underscore. +Example: + +```solidity +// ✅ Correct - return variable is suffixed with underscore +function balanceOf(address _account) public view returns (uint256 balance_) { + balance_ = balances[_account]; +} + +// ❌ Incorrect - return variable without underscore suffix +function balanceOf(address _account) public view returns (uint256 balance) { + balance = balances[_account]; +} +``` + #### Event Parameters +- Event parameters should be named using camelCase. - Event parameters should NOT be prefixed with an underscore. +Example: + +```solidity +// ✅ Correct - event params are not prefixed with underscore +event OwnerChanged(address previousOwner, address newOwner); + +// ❌ Incorrect - event params prefixed with underscore +event OwnerChanged(address _previousOwner, address _newOwner); + +// ❌ Incorrect - event params are not camelCase or are unnamed +event OwnerChanged(address, address NEW_OWNER); + +``` + #### Immutable variables Immutable variables: @@ -87,6 +157,30 @@ This approach clearly indicates to the developer that the value is immutable, wi the non-standard casing to the interface. It also ensures that we don’t need to break the ABIs if we switch between values being in storage and immutable. +Example: + +```solidity +contract ExampleWithImmutable { + // ❌ Incorrect - immutable is not SCREAMING_SNAKE_CASE + address internal immutable ownerAddress; + + // ❌ Incorrect - immutable is public + address public immutable ownerAddress; + + // ✅ Correct - immutable is internal and SCREAMING_SNAKE_CASE + address internal immutable OWNER_ADDRESS; + + constructor(address _owner) { + OWNER_ADDRESS = _owner; + } + + // ✅ Handwritten getter + function ownerAddress() public view returns (address) { + return OWNER_ADDRESS; + } +} +``` + #### Spacers We use spacer variables to account for old storage slots that are no longer being used. @@ -95,6 +189,21 @@ The name of a spacer variable MUST be in the format `spacer___` is the original size of the variable. Spacers MUST be `private`. +Example: + +```solidity +contract ExampleStorageV2 { + // ✅ Correct - spacer preserves old storage layout + bytes32 private spacer_5_0_32; + uint256 public value; +} + +// ❌ Incorrect - wrong visibility and/or naming +contract BadStorageLayout { + bytes32 internal spacer5; +} +``` + ### Proxy by Default All contracts should be assumed to live behind proxies (except in certain special circumstances). @@ -147,6 +256,41 @@ patch increment should be used. Where basic functionality is already supported by an existing contract in the OpenZeppelin library, we should default to using the Upgradeable version of that contract. +### Interface Inheritance + +In order to reduce build times, all external dependencies (ie. a contract that is being interacted with) +should be imported as interfaces. In order to facilitate this, implementation contracts must have an +associated interface in the `interfaces/` directory of the contracts package. Checks in CI +will ensure that the interface exists and is correct. These interfaces should include a +"pseudo-constructor" function (`function __constructor__()`) which ensures that the constructor's +encoding is exposed in the ABI. + +Contracts must not inherit from their own interfaces (e.g., `contract SomeContract is ISomeContract`). +Interfaces may or may not inherit from other interfaces to compose functionality. + +**Rationale:** + +- **Alignment Issues**: If a contracts inherits from a base contracts (like `Ownable`), it cannot inherit from the interface as well, as this prevents 1:1 alignment between the implementation and interface, since the interface cannot include the base contract functions (ie. `owner()`) without causing compiler errors. +- **Constructor Complications**: Interface inheritance can cause issues with pseudo-constructors. + +**Example:** + +```solidity +// ✅ Correct - contract inherits from base contracts, interface composes other interfaces +contract SomeContract is SomeBaseContract, ... { + // Implementation +} + +interface ISomeContract is ISomeBaseContract { + // Interface definition +} + +// ❌ Incorrect - contract inheriting from its own interface +contract SomeContract is ISomeContract, ... { + // This creates alignment and compilation issues +} +``` + ### Source Code The following guidelines should be followed for all contracts in the `src/` directory: diff --git a/packages/contracts-bedrock/book/src/introduction.md b/packages/contracts-bedrock/book/src/introduction.md index a6c0c841926a2..1f24911d06a78 100644 --- a/packages/contracts-bedrock/book/src/introduction.md +++ b/packages/contracts-bedrock/book/src/introduction.md @@ -44,13 +44,4 @@ OP Stack smart contracts use contract interfaces in a relatively unique way. Ple OP Stack smart contracts are designed to utilize a single, consistent Solidity version. Please refer to the [Solidity upgrades][solidity-upgrades] guide to understand the process for updating to newer Solidity versions. -[solidity-upgrades]: ./policies/solidity-upgrades.md - -### Frozen Code - -From time to time we need to ensure that certain files remain frozen, as they may be under audit or a large PR is in the -works and we wish to avoid a large rebase. In order to enforce this, a hardcoded list of contracts is stored in -`./scripts/checks/check-frozen-files.sh`. Any change which affects the resulting init or source code of a contract which -is not allowed to be modified will prevent merging to the `develop` branch. - -In order to remove a file from the freeze it must be removed from the check file. \ No newline at end of file +[solidity-upgrades]: ./policies/solidity-upgrades.md \ No newline at end of file diff --git a/packages/contracts-bedrock/book/src/policies/code-freezes.md b/packages/contracts-bedrock/book/src/policies/code-freezes.md deleted file mode 100644 index bcabe8d672a4c..0000000000000 --- a/packages/contracts-bedrock/book/src/policies/code-freezes.md +++ /dev/null @@ -1,22 +0,0 @@ -# Smart Contract Code Freeze Process - -The Smart Contract Freeze Process is used to protect specific files from accidental changes during sensitive periods. - -## Code Freeze - -Code freezes are implemented by comparison of the bytecode and source code hashes of the local file against the upstream files. - -To enable a code freeze, follow these steps: - -1. Create a PR. -2. The `semver-lock.json` file should already be up to date, but run anyway `just semver-lock` to be sure. -3. Comment out the path and filename of the file/s you want to freeze in check-frozen-files.sh. - -To disable a code freeze, comment out the path and filename of the file/s you want to unfreeze in check-frozen-files.sh. -1. Create a PR. -2. Uncomment the path and filename of all files in check-frozen-files.sh. - -## Exceptions - -To bypass the freeze you can apply the "M-exempt-frozen-files" label on affected PRs. This should be done upon agreement with the code owner. Expected uses of this exception are to fix issues found on audits or to add comments to frozen files. - diff --git a/packages/contracts-bedrock/book/src/policies/versioning.md b/packages/contracts-bedrock/book/src/policies/versioning.md index 84a2b852f00c1..53b988d721248 100644 --- a/packages/contracts-bedrock/book/src/policies/versioning.md +++ b/packages/contracts-bedrock/book/src/policies/versioning.md @@ -80,8 +80,6 @@ The [OPCM](https://github.com/ethereum-optimism/optimism/blob/develop/packages/c The `OPCM` is the source of truth for the contracts that belong in a release, available as on-chain addresses by querying [the `getImplementations` function](https://github.com/ethereum-optimism/optimism/blob/4c8764f0453e141555846d8c9dd2af9edbc1d014/packages/contracts-bedrock/src/L1/OPContractsManager.sol#L1061). -When developing a new release of the contracts, [the `isRC` flag](https://github.com/ethereum-optimism/optimism/blob/4c8764f0453e141555846d8c9dd2af9edbc1d014/packages/contracts-bedrock/src/L1/OPContractsManager.sol#L181) must be set to `true` to indicate that the OPCM refers to a release candidate. The flag [is automatically set to `false`](https://github.com/ethereum-optimism/optimism/blob/4c8764f0453e141555846d8c9dd2af9edbc1d014/packages/contracts-bedrock/src/L1/OPContractsManager.sol#L453) the first time the OPCM `upgrade` method is invoked from governance's Upgrade Controller Safe. This Safe is a 2/2 held by the Security Council and Optimism Foundation. - ## Release Process When a release is proposed to governance, the proposal includes a commit hash, and often the diff --git a/packages/contracts-bedrock/deploy-config/hardhat.json b/packages/contracts-bedrock/deploy-config/hardhat.json index 339a98da80ded..736991c93ba26 100644 --- a/packages/contracts-bedrock/deploy-config/hardhat.json +++ b/packages/contracts-bedrock/deploy-config/hardhat.json @@ -19,7 +19,7 @@ "gasPriceOracleBaseFeeScalar": 1368, "gasPriceOracleBlobBaseFeeScalar": 810949, "l2OutputOracleProposer": "0x70997970C51812dc3A010C7d01b50e0d17dc79C8", - "l2OutputOracleChallenger": "0x6925B8704Ff96DEe942623d6FB5e946EF5884b63", + "l2OutputOracleChallenger": "0x9BA6e03D8B90dE867373Db8cF1A58d2F7F006b3A", "l2GenesisBlockBaseFeePerGas": "0x3B9ACA00", "l2GenesisBlockGasLimit": "0x17D7840", "baseFeeVaultRecipient": "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc", @@ -53,8 +53,8 @@ "faultGameWithdrawalDelay": 302400, "preimageOracleMinProposalSize": 126000, "preimageOracleChallengePeriod": 86400, - "proofMaturityDelaySeconds": 12, - "disputeGameFinalityDelaySeconds": 6, + "proofMaturityDelaySeconds": 604800, + "disputeGameFinalityDelaySeconds": 302400, "respectedGameType": 0, "useFaultProofs": false, "fundDevAccounts": true, diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 48deeb6d8434d..12d9cb5f206ad 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -23,10 +23,13 @@ additional_compiler_profiles = [ ] compilation_restrictions = [ { paths = "src/dispute/FaultDisputeGame.sol", optimizer_runs = 5000 }, + { paths = "src/dispute/v2/FaultDisputeGameV2.sol", optimizer_runs = 5000 }, { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 5000 }, + { paths = "src/dispute/v2/PermissionedDisputeGameV2.sol", optimizer_runs = 5000 }, { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 5000 }, { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 5000 }, - { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 5000 } + { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 5000 }, + { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 5000 } ] extra_output = ['devdoc', 'userdoc', 'metadata', 'storageLayout'] @@ -129,6 +132,7 @@ timeout = 300 [profile.lite] optimizer = false +optimizer_runs = 0 # IMPORTANT: # See the info in the "DEFAULT" profile to understand this section. @@ -137,10 +141,13 @@ additional_compiler_profiles = [ ] compilation_restrictions = [ { paths = "src/dispute/FaultDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/v2/FaultDisputeGameV2.sol", optimizer_runs = 0 }, { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/v2/PermissionedDisputeGameV2.sol", optimizer_runs = 0 }, { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 0 }, { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 0 }, { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 0 }, + { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 0 }, ] ################################################################ diff --git a/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol index 81d7bcd22abb1..75d61233ae242 100644 --- a/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1CrossDomainMessenger.sol @@ -22,7 +22,6 @@ interface IL1CrossDomainMessenger is ICrossDomainMessenger, IProxyAdminOwnedBase function systemConfig() external view returns (ISystemConfig); function version() external view returns (string memory); function superchainConfig() external view returns (ISuperchainConfig); - function upgrade(ISystemConfig _systemConfig) external; function __constructor__() external; } diff --git a/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol b/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol index ab50cdb244256..a73a743dacd3c 100644 --- a/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1ERC721Bridge.sol @@ -11,7 +11,6 @@ interface IL1ERC721Bridge is IERC721Bridge, IProxyAdminOwnedBase { error ReinitializableBase_ZeroInitVersion(); function initVersion() external view returns (uint8); - function upgrade(ISystemConfig _systemConfig) external; function bridgeERC721( address _localToken, address _remoteToken, diff --git a/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol index 4ea5e42f1edfe..0e22bb9b9c45c 100644 --- a/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol +++ b/packages/contracts-bedrock/interfaces/L1/IL1StandardBridge.sol @@ -31,7 +31,6 @@ interface IL1StandardBridge is IStandardBridge, IProxyAdminOwnedBase { event ETHWithdrawalFinalized(address indexed from, address indexed to, uint256 amount, bytes extraData); function initVersion() external view returns (uint8); - function upgrade(ISystemConfig _systemConfig) external; function depositERC20( address _l1Token, address _l2Token, diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol index 3dead92e931dc..aaffb5972a3e9 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager.sol @@ -26,14 +26,19 @@ import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; interface IOPContractsManagerContractsContainer { + error OPContractsManagerContractsContainer_DevFeatureInProd(); + function __constructor__( IOPContractsManager.Blueprints memory _blueprints, - IOPContractsManager.Implementations memory _implementations + IOPContractsManager.Implementations memory _implementations, + bytes32 _devFeatureBitmap ) external; function blueprints() external view returns (IOPContractsManager.Blueprints memory); function implementations() external view returns (IOPContractsManager.Implementations memory); + function devFeatureBitmap() external view returns (bytes32); + function isDevFeatureEnabled(bytes32 _feature) external view returns (bool); } interface IOPContractsManagerGameTypeAdder { @@ -54,7 +59,7 @@ interface IOPContractsManagerGameTypeAdder { returns (IOPContractsManager.AddGameOutput[] memory); function updatePrestate( - IOPContractsManager.OpChainConfig[] memory _prestateUpdateInputs, + IOPContractsManager.UpdatePrestateInput[] memory _prestateUpdateInputs, address _superchainConfig ) external; @@ -81,10 +86,16 @@ interface IOPContractsManagerDeployer { interface IOPContractsManagerUpgrader { event Upgraded(uint256 indexed l2ChainId, address indexed systemConfig, address indexed upgrader); + error OPContractsManagerUpgrader_SuperchainConfigNeedsUpgrade(uint256 index); + + error OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate(); + function __constructor__(IOPContractsManagerContractsContainer _contractsContainer) external; function upgrade(IOPContractsManager.OpChainConfig[] memory _opChainConfigs) external; + function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig, IProxyAdmin _superchainProxyAdmin) external; + function contractsContainer() external view returns (IOPContractsManagerContractsContainer); } @@ -116,11 +127,6 @@ interface IOPContractsManagerInteropMigrator { } interface IOPContractsManager { - // -------- Events -------- - - /// @notice Emitted when the OPCM setRC function is called. - event Released(bool _isRC); - // -------- Structs -------- /// @notice Represents the roles that can be set when deploying a standard OP Stack chain. @@ -200,6 +206,7 @@ interface IOPContractsManager { address protocolVersionsImpl; address l1ERC721BridgeImpl; address optimismPortalImpl; + address optimismPortalInteropImpl; address ethLockboxImpl; address systemConfigImpl; address optimismMintableERC20FactoryImpl; @@ -218,6 +225,13 @@ interface IOPContractsManager { Claim absolutePrestate; } + /// @notice The input required to identify a chain for updating prestates + struct UpdatePrestateInput { + ISystemConfig systemConfigProxy; + Claim cannonPrestate; + Claim cannonKonaPrestate; + } + struct AddGameInput { string saltMixer; ISystemConfig systemConfig; @@ -252,14 +266,8 @@ interface IOPContractsManager { /// @notice Address of the ProxyAdmin contract shared by all chains. function superchainProxyAdmin() external view returns (IProxyAdmin); - /// @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which - /// version of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. - function l1ContractsRelease() external view returns (string memory); - // -------- Errors -------- - error OnlyUpgradeController(); - /// @notice Thrown when an address is the zero address. error AddressNotFound(address who); @@ -306,9 +314,7 @@ interface IOPContractsManager { IOPContractsManagerStandardValidator _opcmStandardValidator, ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions, - IProxyAdmin _superchainProxyAdmin, - string memory _l1ContractsRelease, - address _upgradeController + IProxyAdmin _superchainProxyAdmin ) external; @@ -335,13 +341,18 @@ interface IOPContractsManager { /// @param _opChainConfigs The chains to upgrade function upgrade(OpChainConfig[] memory _opChainConfigs) external; + /// @notice Upgrades the SuperchainConfig contract. + /// @param _superchainConfig The SuperchainConfig contract to upgrade. + /// @param _superchainProxyAdmin The ProxyAdmin contract to use for the upgrade. + function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig, IProxyAdmin _superchainProxyAdmin) external; + /// @notice addGameType deploys a new dispute game and links it to the DisputeGameFactory. The inputted _gameConfigs /// must be added in ascending GameType order. function addGameType(AddGameInput[] memory _gameConfigs) external returns (AddGameOutput[] memory); /// @notice Updates the prestate hash for a new game type while keeping all other parameters the same - /// @param _prestateUpdateInputs The new prestate hash to use - function updatePrestate(OpChainConfig[] memory _prestateUpdateInputs) external; + /// @param _prestateUpdateInputs The new prestates to use + function updatePrestate(UpdatePrestateInput[] memory _prestateUpdateInputs) external; /// @notice Migrates one or more OP Stack chains to use the Super Root dispute games and shared /// dispute game contracts. @@ -367,36 +378,15 @@ interface IOPContractsManager { function opcmStandardValidator() external view returns (IOPContractsManagerStandardValidator); - /// @notice Returns the implementation contract addresses. - function implementations() external view returns (Implementations memory); - - function upgradeController() external view returns (address); + /// @notice Retrieves the development feature bitmap stored in this OPCM contract + /// @return The development feature bitmap. + function devFeatureBitmap() external view returns (bytes32); - function isRC() external view returns (bool); - - function setRC(bool _isRC) external; -} + /// @notice Returns the status of a development feature. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _feature) external view returns (bool); -/// @notice Minimal interface only used for calling `implementations()` method but without retrieving the ETHLockbox -/// on it, since the OPCM contracts already deployed on mainnet don't have it. -/// @dev Only used for testing. -interface IOPCMImplementationsWithoutLockbox { - /// @notice The implementation contracts for the OP Stack, without the newly added ETHLockbox. - struct Implementations { - address superchainConfigImpl; - address protocolVersionsImpl; - address l1ERC721BridgeImpl; - address optimismPortalImpl; - address systemConfigImpl; - address optimismMintableERC20FactoryImpl; - address l1CrossDomainMessengerImpl; - address l1StandardBridgeImpl; - address disputeGameFactoryImpl; - address anchorStateRegistryImpl; - address delayedWETHImpl; - address mipsImpl; - } - - /// @notice Returns the implementation contracts without the ETHLockbox. + /// @notice Returns the implementation contract addresses. function implementations() external view returns (Implementations memory); } diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager180.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager180.sol deleted file mode 100644 index 682d3431f49cf..0000000000000 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager180.sol +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -/// @notice Interface for the OPCM v1.8.0 release contract. This is temporarily required for -/// upgrade 12 so that the deployment of the OPPrestateUpdater can read and reuse the existing -/// permissioned dispute game blueprints. -interface IOPContractsManager180 { - struct Blueprints { - address addressManager; - address proxy; - address proxyAdmin; - address l1ChugSplashProxy; - address resolvedDelegateProxy; - address anchorStateRegistry; - address permissionedDisputeGame1; - address permissionedDisputeGame2; - } - - function blueprints() external view returns (Blueprints memory); -} diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager200.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManager200.sol deleted file mode 100644 index 830ab69aeb3d8..0000000000000 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManager200.sol +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -/// @notice Interface for the OPCM v2.0.0 release. -interface IOPContractsManager200 { - struct Blueprints { - address addressManager; - address proxy; - address proxyAdmin; - address l1ChugSplashProxy; - address resolvedDelegateProxy; - address permissionedDisputeGame1; - address permissionedDisputeGame2; - address permissionlessDisputeGame1; - address permissionlessDisputeGame2; - } - - function blueprints() external view returns (Blueprints memory); -} diff --git a/packages/contracts-bedrock/interfaces/L1/IOPContractsManagerStandardValidator.sol b/packages/contracts-bedrock/interfaces/L1/IOPContractsManagerStandardValidator.sol index fedf131fd0c98..15cb768e44d41 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOPContractsManagerStandardValidator.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOPContractsManagerStandardValidator.sol @@ -10,6 +10,7 @@ interface IOPContractsManagerStandardValidator { struct Implementations { address l1ERC721BridgeImpl; address optimismPortalImpl; + address optimismPortalInteropImpl; address ethLockboxImpl; address systemConfigImpl; address optimismMintableERC20FactoryImpl; @@ -35,32 +36,23 @@ interface IOPContractsManagerStandardValidator { function version() external view returns (string memory); function anchorStateRegistryImpl() external view returns (address); - function anchorStateRegistryVersion() external pure returns (string memory); function challenger() external view returns (address); function delayedWETHImpl() external view returns (address); - function delayedWETHVersion() external pure returns (string memory); + function devFeatureBitmap() external view returns (bytes32); function disputeGameFactoryImpl() external view returns (address); - function disputeGameFactoryVersion() external pure returns (string memory); function l1CrossDomainMessengerImpl() external view returns (address); - function l1CrossDomainMessengerVersion() external pure returns (string memory); function l1ERC721BridgeImpl() external view returns (address); - function l1ERC721BridgeVersion() external pure returns (string memory); function l1PAOMultisig() external view returns (address); function l1StandardBridgeImpl() external view returns (address); - function l1StandardBridgeVersion() external pure returns (string memory); function mipsImpl() external view returns (address); - function mipsVersion() external pure returns (string memory); function optimismMintableERC20FactoryImpl() external view returns (address); - function optimismMintableERC20FactoryVersion() external pure returns (string memory); function optimismPortalImpl() external view returns (address); - function optimismPortalVersion() external pure returns (string memory); + function optimismPortalInteropImpl() external view returns (address); function ethLockboxImpl() external view returns (address); - function ethLockboxVersion() external pure returns (string memory); function permissionedDisputeGameVersion() external pure returns (string memory); function preimageOracleVersion() external pure returns (string memory); function superchainConfig() external view returns (ISuperchainConfig); function systemConfigImpl() external view returns (address); - function systemConfigVersion() external pure returns (string memory); function withdrawalDelaySeconds() external view returns (uint256); function validateWithOverrides( @@ -78,7 +70,8 @@ interface IOPContractsManagerStandardValidator { ISuperchainConfig _superchainConfig, address _l1PAOMultisig, address _challenger, - uint256 _withdrawalDelaySeconds + uint256 _withdrawalDelaySeconds, + bytes32 _devFeatureBitmap ) external; } diff --git a/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol index 2efb62bcdb1c2..5db9d7555b582 100644 --- a/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol +++ b/packages/contracts-bedrock/interfaces/L1/IOptimismPortal2.sol @@ -12,7 +12,6 @@ import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; interface IOptimismPortal2 is IProxyAdminOwnedBase { - error OptimismPortal_Unauthorized(); error ContentLengthMismatch(); error EmptyItem(); error InvalidDataRemainder(); @@ -33,13 +32,7 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { error OptimismPortal_NoReentrancy(); error OptimismPortal_ProofNotOldEnough(); error OptimismPortal_Unproven(); - error OptimismPortal_InvalidOutputRootIndex(); - error OptimismPortal_InvalidSuperRootProof(); - error OptimismPortal_InvalidOutputRootChainId(); - error OptimismPortal_WrongProofMethod(); - error OptimismPortal_MigratingToSameRegistry(); - error Encoding_EmptySuperRoot(); - error Encoding_InvalidSuperRootVersion(); + error OptimismPortal_InvalidLockboxState(); error OutOfGas(); error UnexpectedList(); error UnexpectedString(); @@ -49,8 +42,6 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success); event WithdrawalProven(bytes32 indexed withdrawalHash, address indexed from, address indexed to); event WithdrawalProvenExtension1(bytes32 indexed withdrawalHash, address indexed proofSubmitter); - event ETHMigrated(address indexed lockbox, uint256 ethBalance); - event PortalMigrated(IETHLockbox oldLockbox, IETHLockbox newLockbox, IAnchorStateRegistry oldAnchorStateRegistry, IAnchorStateRegistry newAnchorStateRegistry); receive() external payable; @@ -71,7 +62,6 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { function disputeGameFinalityDelaySeconds() external view returns (uint256); function donateETH() external payable; function superchainConfig() external view returns (ISuperchainConfig); - function migrateToSuperRoots(IETHLockbox _newLockbox, IAnchorStateRegistry _newAnchorStateRegistry) external; function finalizeWithdrawalTransaction(Types.WithdrawalTransaction memory _tx) external; function finalizeWithdrawalTransactionExternalProof( Types.WithdrawalTransaction memory _tx, @@ -82,8 +72,7 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { function guardian() external view returns (address); function initialize( ISystemConfig _systemConfig, - IAnchorStateRegistry _anchorStateRegistry, - IETHLockbox _ethLockbox + IAnchorStateRegistry _anchorStateRegistry ) external; function initVersion() external view returns (uint8); @@ -101,15 +90,6 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { bytes[] memory _withdrawalProof ) external; - function proveWithdrawalTransaction( - Types.WithdrawalTransaction memory _tx, - IDisputeGame _disputeGameProxy, - uint256 _outputRootIndex, - Types.SuperRootProof memory _superRootProof, - Types.OutputRootProof memory _outputRootProof, - bytes[] memory _withdrawalProof - ) - external; function provenWithdrawals( bytes32, address @@ -119,11 +99,8 @@ interface IOptimismPortal2 is IProxyAdminOwnedBase { returns (IDisputeGame disputeGameProxy, uint64 timestamp); function respectedGameType() external view returns (GameType); function respectedGameTypeUpdatedAt() external view returns (uint64); - function superRootsActive() external view returns (bool); function systemConfig() external view returns (ISystemConfig); - function upgrade(IAnchorStateRegistry _anchorStateRegistry, IETHLockbox _ethLockbox) external; function version() external pure returns (string memory); - function migrateLiquidity() external; function setMinter(address _minter) external; function mintTransaction(address _to, uint256 _value) external; diff --git a/packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol b/packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol new file mode 100644 index 0000000000000..6e37f36647882 --- /dev/null +++ b/packages/contracts-bedrock/interfaces/L1/IOptimismPortalInterop.sol @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Types } from "src/libraries/Types.sol"; +import { GameType } from "src/dispute/lib/LibUDT.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; +import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; + +interface IOptimismPortalInterop is IProxyAdminOwnedBase { + error ContentLengthMismatch(); + error EmptyItem(); + error InvalidDataRemainder(); + error InvalidHeader(); + error ReinitializableBase_ZeroInitVersion(); + error OptimismPortal_AlreadyFinalized(); + error OptimismPortal_BadTarget(); + error OptimismPortal_CallPaused(); + error OptimismPortal_CalldataTooLarge(); + error OptimismPortal_GasEstimation(); + error OptimismPortal_GasLimitTooLow(); + error OptimismPortal_ImproperDisputeGame(); + error OptimismPortal_InvalidDisputeGame(); + error OptimismPortal_InvalidMerkleProof(); + error OptimismPortal_InvalidOutputRootProof(); + error OptimismPortal_InvalidProofTimestamp(); + error OptimismPortal_InvalidRootClaim(); + error OptimismPortal_NoReentrancy(); + error OptimismPortal_ProofNotOldEnough(); + error OptimismPortal_Unproven(); + error OptimismPortal_InvalidOutputRootIndex(); + error OptimismPortal_InvalidSuperRootProof(); + error OptimismPortal_InvalidOutputRootChainId(); + error OptimismPortal_WrongProofMethod(); + error OptimismPortal_MigratingToSameRegistry(); + error Encoding_EmptySuperRoot(); + error Encoding_InvalidSuperRootVersion(); + error OutOfGas(); + error UnexpectedList(); + error UnexpectedString(); + + event Initialized(uint8 version); + event TransactionDeposited(address indexed from, address indexed to, uint256 indexed version, bytes opaqueData); + event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success); + event WithdrawalProven(bytes32 indexed withdrawalHash, address indexed from, address indexed to); + event WithdrawalProvenExtension1(bytes32 indexed withdrawalHash, address indexed proofSubmitter); + event ETHMigrated(address indexed lockbox, uint256 ethBalance); + event PortalMigrated(IETHLockbox oldLockbox, IETHLockbox newLockbox, IAnchorStateRegistry oldAnchorStateRegistry, IAnchorStateRegistry newAnchorStateRegistry); + + receive() external payable; + + function anchorStateRegistry() external view returns (IAnchorStateRegistry); + function ethLockbox() external view returns (IETHLockbox); + function checkWithdrawal(bytes32 _withdrawalHash, address _proofSubmitter) external view; + function depositTransaction( + address _to, + uint256 _value, + uint64 _gasLimit, + bool _isCreation, + bytes memory _data + ) + external + payable; + function disputeGameBlacklist(IDisputeGame _disputeGame) external view returns (bool); + function disputeGameFactory() external view returns (IDisputeGameFactory); + function disputeGameFinalityDelaySeconds() external view returns (uint256); + function donateETH() external payable; + function superchainConfig() external view returns (ISuperchainConfig); + function migrateToSuperRoots(IETHLockbox _newLockbox, IAnchorStateRegistry _newAnchorStateRegistry) external; + function finalizeWithdrawalTransaction(Types.WithdrawalTransaction memory _tx) external; + function finalizeWithdrawalTransactionExternalProof( + Types.WithdrawalTransaction memory _tx, + address _proofSubmitter + ) + external; + function finalizedWithdrawals(bytes32) external view returns (bool); + function guardian() external view returns (address); + function initialize( + ISystemConfig _systemConfig, + IAnchorStateRegistry _anchorStateRegistry, + IETHLockbox _ethLockbox + ) + external; + function initVersion() external view returns (uint8); + function l2Sender() external view returns (address); + function minimumGasLimit(uint64 _byteCount) external pure returns (uint64); + function numProofSubmitters(bytes32 _withdrawalHash) external view returns (uint256); + function params() external view returns (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum); // nosemgrep + function paused() external view returns (bool); + function proofMaturityDelaySeconds() external view returns (uint256); + function proofSubmitters(bytes32, uint256) external view returns (address); + function proveWithdrawalTransaction( + Types.WithdrawalTransaction memory _tx, + uint256 _disputeGameIndex, + Types.OutputRootProof memory _outputRootProof, + bytes[] memory _withdrawalProof + ) + external; + function proveWithdrawalTransaction( + Types.WithdrawalTransaction memory _tx, + IDisputeGame _disputeGameProxy, + uint256 _outputRootIndex, + Types.SuperRootProof memory _superRootProof, + Types.OutputRootProof memory _outputRootProof, + bytes[] memory _withdrawalProof + ) + external; + function provenWithdrawals( + bytes32, + address + ) + external + view + returns (IDisputeGame disputeGameProxy, uint64 timestamp); + function respectedGameType() external view returns (GameType); + function respectedGameTypeUpdatedAt() external view returns (uint64); + function superRootsActive() external view returns (bool); + function systemConfig() external view returns (ISystemConfig); + function upgrade(IAnchorStateRegistry _anchorStateRegistry, IETHLockbox _ethLockbox) external; + function version() external pure returns (string memory); + function migrateLiquidity() external; + + function __constructor__(uint256 _proofMaturityDelaySeconds) external; +} diff --git a/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol b/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol index df63c6888873a..73973b52a0fda 100644 --- a/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol +++ b/packages/contracts-bedrock/interfaces/L1/ISuperchainConfig.sol @@ -20,7 +20,6 @@ interface ISuperchainConfig is IProxyAdminOwnedBase { function guardian() external view returns (address); function initialize(address _guardian) external; - function upgrade() external; function pause(address _identifier) external; function unpause(address _identifier) external; function pausable(address _identifier) external view returns (bool); diff --git a/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol b/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol index ca2c3ebe1444e..4abb69a1c1ab1 100644 --- a/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol +++ b/packages/contracts-bedrock/interfaces/L1/ISystemConfig.sol @@ -12,7 +12,8 @@ interface ISystemConfig is IProxyAdminOwnedBase { GAS_LIMIT, UNSAFE_BLOCK_SIGNER, EIP_1559_PARAMS, - OPERATOR_FEE_PARAMS + OPERATOR_FEE_PARAMS, + MIN_BASE_FEE } struct Addresses { @@ -24,8 +25,10 @@ interface ISystemConfig is IProxyAdminOwnedBase { } error ReinitializableBase_ZeroInitVersion(); + error SystemConfig_InvalidFeatureState(); event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); + event FeatureSet(bytes32 indexed feature, bool indexed enabled); event Initialized(uint8 version); event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); @@ -70,6 +73,7 @@ interface ISystemConfig is IProxyAdminOwnedBase { function minimumGasLimit() external view returns (uint64); function operatorFeeConstant() external view returns (uint64); function operatorFeeScalar() external view returns (uint32); + function minBaseFee() external view returns (uint64); function optimismMintableERC20Factory() external view returns (address addr_); function optimismPortal() external view returns (address addr_); function overhead() external view returns (uint256); @@ -84,14 +88,16 @@ interface ISystemConfig is IProxyAdminOwnedBase { function setOperatorFeeScalars(uint32 _operatorFeeScalar, uint64 _operatorFeeConstant) external; function setUnsafeBlockSigner(address _unsafeBlockSigner) external; function setEIP1559Params(uint32 _denominator, uint32 _elasticity) external; + function setMinBaseFee(uint64 _minBaseFee) external; function startBlock() external view returns (uint256 startBlock_); function transferOwnership(address newOwner) external; // nosemgrep function unsafeBlockSigner() external view returns (address addr_); - function upgrade(uint256 _l2ChainId, ISuperchainConfig _superchainConfig) external; function version() external pure returns (string memory); function paused() external view returns (bool); function superchainConfig() external view returns (ISuperchainConfig); function guardian() external view returns (address); + function setFeature(bytes32 _feature, bool _enabled) external; + function isFeatureEnabled(bytes32) external view returns (bool); function __constructor__() external; } diff --git a/packages/contracts-bedrock/interfaces/cannon/IMIPS.sol b/packages/contracts-bedrock/interfaces/cannon/IMIPS.sol deleted file mode 100644 index 8ad7ccdc21a3b..0000000000000 --- a/packages/contracts-bedrock/interfaces/cannon/IMIPS.sol +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { ISemver } from "interfaces/universal/ISemver.sol"; -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; - -/// @title IMIPS -/// @notice Interface for the MIPS contract. -interface IMIPS is ISemver { - struct State { - bytes32 memRoot; - bytes32 preimageKey; - uint32 preimageOffset; - uint32 pc; - uint32 nextPC; - uint32 lo; - uint32 hi; - uint32 heap; - uint8 exitCode; - bool exited; - uint64 step; - uint32[32] registers; - } - - error InvalidMemoryProof(); - error InvalidRMWInstruction(); - - function oracle() external view returns (IPreimageOracle oracle_); - function step(bytes memory _stateData, bytes memory _proof, bytes32 _localContext) external returns (bytes32); - - function __constructor__(IPreimageOracle _oracle) external; -} diff --git a/packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol b/packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol deleted file mode 100644 index 8ce918e87be33..0000000000000 --- a/packages/contracts-bedrock/interfaces/cannon/IMIPS2.sol +++ /dev/null @@ -1,54 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.0; - -import { ISemver } from "interfaces/universal/ISemver.sol"; -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; - -/// @title IMIPS2 -/// @notice Interface for the MIPS2 contract. -interface IMIPS2 is ISemver { - struct ThreadState { - uint32 threadID; - uint8 exitCode; - bool exited; - uint32 pc; - uint32 nextPC; - uint32 lo; - uint32 hi; - uint32[32] registers; - } - - struct State { - bytes32 memRoot; - bytes32 preimageKey; - uint32 preimageOffset; - uint32 heap; - uint8 llReservationStatus; - uint32 llAddress; - uint32 llOwnerThread; - uint8 exitCode; - bool exited; - uint64 step; - uint64 stepsSinceLastContextSwitch; - bool traverseRight; - bytes32 leftThreadStack; - bytes32 rightThreadStack; - uint32 nextThreadID; - } - - error InvalidExitedValue(); - error InvalidMemoryProof(); - error InvalidSecondMemoryProof(); - error InvalidRMWInstruction(); - - function oracle() external view returns (IPreimageOracle oracle_); - function step( - bytes memory _stateData, - bytes memory _proof, - bytes32 _localContext - ) - external - returns (bytes32 postState_); - - function __constructor__(IPreimageOracle _oracle, uint256 /*_stateVersion*/) external; -} diff --git a/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol b/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol index 4b8d9750934d6..05b448e55df6c 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDelayedWETH.sol @@ -3,15 +3,9 @@ pragma solidity ^0.8.0; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; -import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; - -interface IDelayedWETH { - error ProxyAdminOwnedBase_NotSharedProxyAdminOwner(); - error ProxyAdminOwnedBase_NotProxyAdminOwner(); - error ProxyAdminOwnedBase_NotProxyAdmin(); - error ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner(); - error ProxyAdminOwnedBase_ProxyAdminNotFound(); - error ProxyAdminOwnedBase_NotResolvedDelegateProxy(); +import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; + +interface IDelayedWETH is IProxyAdminOwnedBase { error ReinitializableBase_ZeroInitVersion(); struct WithdrawalRequest { @@ -35,8 +29,6 @@ interface IDelayedWETH { function withdraw(address _guy, uint256 _wad) external; function withdrawals(address, address) external view returns (uint256 amount, uint256 timestamp); function version() external view returns (string memory); - function proxyAdmin() external view returns (IProxyAdmin); - function proxyAdminOwner() external view returns (address); function withdraw(uint256 _wad) external; event Approval(address indexed src, address indexed guy, uint256 wad); diff --git a/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol b/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol index f0d94c6581cdb..da99c869a97b0 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IDisputeGameFactory.sol @@ -21,6 +21,7 @@ interface IDisputeGameFactory is IProxyAdminOwnedBase, IReinitializableBase { event DisputeGameCreated(address indexed disputeProxy, GameType indexed gameType, Claim indexed rootClaim); event ImplementationSet(address indexed impl, GameType indexed gameType); + event ImplementationArgsSet(GameType indexed gameType, bytes args); event InitBondUpdated(GameType indexed gameType, uint256 indexed newBond); event Initialized(uint8 version); event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); @@ -46,6 +47,7 @@ interface IDisputeGameFactory is IProxyAdminOwnedBase, IReinitializableBase { view returns (GameType gameType_, Timestamp timestamp_, IDisputeGame proxy_); function gameCount() external view returns (uint256 gameCount_); + function gameArgs(GameType) external view returns (bytes memory); function gameImpls(GameType) external view returns (IDisputeGame); function games( GameType _gameType, @@ -68,6 +70,7 @@ interface IDisputeGameFactory is IProxyAdminOwnedBase, IReinitializableBase { function owner() external view returns (address); function renounceOwnership() external; function setImplementation(GameType _gameType, IDisputeGame _impl) external; + function setImplementation(GameType _gameType, IDisputeGame _impl, bytes calldata _args) external; function setInitBond(GameType _gameType, uint256 _initBond) external; function transferOwnership(address newOwner) external; // nosemgrep function version() external view returns (string memory); diff --git a/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol index 80b41bc958b68..86ace4d527077 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IFaultDisputeGame.sol @@ -41,6 +41,7 @@ interface IFaultDisputeGame is IDisputeGame { error AlreadyInitialized(); error AnchorRootNotFound(); + error BadExtraData(); error BlockNumberMatches(); error BondTransferFailed(); error CannotDefendRootClaim(); diff --git a/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol index aa174ddaa1d39..788c65790c030 100644 --- a/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/IPermissionedDisputeGame.sol @@ -30,6 +30,7 @@ interface IPermissionedDisputeGame is IDisputeGame { error AlreadyInitialized(); error AnchorRootNotFound(); + error BadExtraData(); error BlockNumberMatches(); error BondTransferFailed(); error CannotDefendRootClaim(); diff --git a/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol index 390f09e537a90..276551af7a740 100644 --- a/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/ISuperFaultDisputeGame.sol @@ -40,6 +40,7 @@ interface ISuperFaultDisputeGame is IDisputeGame { error AlreadyInitialized(); error AnchorRootNotFound(); + error BadExtraData(); error BondTransferFailed(); error CannotDefendRootClaim(); error ClaimAboveSplit(); diff --git a/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol b/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol index 2a2fed003635d..7e2b5e73aa446 100644 --- a/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/interfaces/dispute/ISuperPermissionedDisputeGame.sol @@ -41,6 +41,7 @@ interface ISuperPermissionedDisputeGame is IDisputeGame { error AlreadyInitialized(); error AnchorRootNotFound(); + error BadExtraData(); error BondTransferFailed(); error CannotDefendRootClaim(); error ClaimAboveSplit(); diff --git a/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol b/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol new file mode 100644 index 0000000000000..91553eddb8d98 --- /dev/null +++ b/packages/contracts-bedrock/interfaces/dispute/v2/IFaultDisputeGameV2.sol @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { Types } from "src/libraries/Types.sol"; +import { GameType, Claim, Position, Clock, Hash, Duration, BondDistributionMode } from "src/dispute/lib/Types.sol"; + +interface IFaultDisputeGameV2 is IDisputeGame { + struct ClaimData { + uint32 parentIndex; + address counteredBy; + address claimant; + uint128 bond; + Claim claim; + Position position; + Clock clock; + } + + struct ResolutionCheckpoint { + bool initialCheckpointComplete; + uint32 subgameIndex; + Position leftmostPosition; + address counteredBy; + } + + struct GameConstructorParams { + GameType gameType; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + } + + error AlreadyInitialized(); + error AnchorRootNotFound(); + error BadExtraData(); + error BlockNumberMatches(); + error BondTransferFailed(); + error CannotDefendRootClaim(); + error ClaimAboveSplit(); + error ClaimAlreadyExists(); + error ClaimAlreadyResolved(); + error ClockNotExpired(); + error ClockTimeExceeded(); + error ContentLengthMismatch(); + error DuplicateStep(); + error EmptyItem(); + error GameDepthExceeded(); + error GameNotInProgress(); + error IncorrectBondAmount(); + error InvalidChallengePeriod(); + error InvalidClockExtension(); + error InvalidDataRemainder(); + error InvalidDisputedClaimIndex(); + error InvalidHeader(); + error InvalidHeaderRLP(); + error InvalidLocalIdent(); + error InvalidOutputRootProof(); + error InvalidParent(); + error InvalidPrestate(); + error InvalidSplitDepth(); + error L2BlockNumberChallenged(); + error MaxDepthTooLarge(); + error NoCreditToClaim(); + error OutOfOrderResolution(); + error UnexpectedList(); + error UnexpectedRootClaim(Claim rootClaim); + error UnexpectedString(); + error ValidStep(); + error InvalidBondDistributionMode(); + error GameNotFinalized(); + error GameNotResolved(); + error ReservedGameType(); + error GamePaused(); + event Move(uint256 indexed parentIndex, Claim indexed claim, address indexed claimant); + event GameClosed(BondDistributionMode bondDistributionMode); + + function absolutePrestate() external view returns (Claim absolutePrestate_); + function addLocalData(uint256 _ident, uint256 _execLeafIdx, uint256 _partOffset) external; + function anchorStateRegistry() external view returns (IAnchorStateRegistry registry_); + function attack(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable; + function bondDistributionMode() external view returns (BondDistributionMode); + function challengeRootL2Block(Types.OutputRootProof memory _outputRootProof, bytes memory _headerRLP) external; + function claimCredit(address _recipient) external; + function claimData(uint256) + external + view // nosemgrep + returns ( + uint32 parentIndex, + address counteredBy, + address claimant, + uint128 bond, + Claim claim, + Position position, + Clock clock + ); + function claimDataLen() external view returns (uint256 len_); + function claims(Hash) external view returns (bool); + function clockExtension() external view returns (Duration clockExtension_); + function closeGame() external; + function credit(address _recipient) external view returns (uint256 credit_); + function defend(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable; + function getChallengerDuration(uint256 _claimIndex) external view returns (Duration duration_); + function getNumToResolve(uint256 _claimIndex) external view returns (uint256 numRemainingChildren_); + function getRequiredBond(Position _position) external view returns (uint256 requiredBond_); + function hasUnlockedCredit(address) external view returns (bool); + function l2BlockNumber() external pure returns (uint256 l2BlockNumber_); + function l2BlockNumberChallenged() external view returns (bool); + function l2BlockNumberChallenger() external view returns (address); + function l2ChainId() external view returns (uint256 l2ChainId_); + function maxClockDuration() external view returns (Duration maxClockDuration_); + function maxGameDepth() external view returns (uint256 maxGameDepth_); + function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; + function normalModeCredit(address) external view returns (uint256); + function refundModeCredit(address) external view returns (uint256); + function resolutionCheckpoints(uint256) + external + view + returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); // nosemgrep + function resolveClaim(uint256 _claimIndex, uint256 _numToResolve) external; + function resolvedSubgames(uint256) external view returns (bool); + function splitDepth() external view returns (uint256 splitDepth_); + function startingBlockNumber() external view returns (uint256 startingBlockNumber_); + function startingOutputRoot() external view returns (Hash root, uint256 l2SequenceNumber); // nosemgrep + function startingRootHash() external view returns (Hash startingRootHash_); + function step(uint256 _claimIndex, bool _isAttack, bytes memory _stateData, bytes memory _proof) external; + function subgames(uint256, uint256) external view returns (uint256); + function version() external pure returns (string memory); + function vm() external view returns (IBigStepper vm_); + function wasRespectedGameTypeWhenCreated() external view returns (bool); + function weth() external view returns (IDelayedWETH weth_); + + function __constructor__(GameConstructorParams memory _params) external; +} diff --git a/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol b/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol new file mode 100644 index 0000000000000..78eb910d8648d --- /dev/null +++ b/packages/contracts-bedrock/interfaces/dispute/v2/IPermissionedDisputeGameV2.sol @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Types } from "src/libraries/Types.sol"; +import { Claim, Position, Clock, Hash, Duration, BondDistributionMode } from "src/dispute/lib/Types.sol"; + +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IFaultDisputeGameV2 } from "interfaces/dispute/v2/IFaultDisputeGameV2.sol"; + +interface IPermissionedDisputeGameV2 is IDisputeGame { + struct ClaimData { + uint32 parentIndex; + address counteredBy; + address claimant; + uint128 bond; + Claim claim; + Position position; + Clock clock; + } + + struct ResolutionCheckpoint { + bool initialCheckpointComplete; + uint32 subgameIndex; + Position leftmostPosition; + address counteredBy; + } + + error AlreadyInitialized(); + error AnchorRootNotFound(); + error BadExtraData(); + error BlockNumberMatches(); + error BondTransferFailed(); + error CannotDefendRootClaim(); + error ClaimAboveSplit(); + error ClaimAlreadyExists(); + error ClaimAlreadyResolved(); + error ClockNotExpired(); + error ClockTimeExceeded(); + error ContentLengthMismatch(); + error DuplicateStep(); + error EmptyItem(); + error GameDepthExceeded(); + error GameNotInProgress(); + error IncorrectBondAmount(); + error InvalidChallengePeriod(); + error InvalidClockExtension(); + error InvalidDataRemainder(); + error InvalidDisputedClaimIndex(); + error InvalidHeader(); + error InvalidHeaderRLP(); + error InvalidLocalIdent(); + error InvalidOutputRootProof(); + error InvalidParent(); + error InvalidPrestate(); + error InvalidSplitDepth(); + error L2BlockNumberChallenged(); + error MaxDepthTooLarge(); + error NoCreditToClaim(); + error OutOfOrderResolution(); + error UnexpectedList(); + error UnexpectedRootClaim(Claim rootClaim); + error UnexpectedString(); + error ValidStep(); + error InvalidBondDistributionMode(); + error GameNotFinalized(); + error GameNotResolved(); + error ReservedGameType(); + error GamePaused(); + event Move(uint256 indexed parentIndex, Claim indexed claim, address indexed claimant); + event GameClosed(BondDistributionMode bondDistributionMode); + + function absolutePrestate() external view returns (Claim absolutePrestate_); + function addLocalData(uint256 _ident, uint256 _execLeafIdx, uint256 _partOffset) external; + function anchorStateRegistry() external view returns (IAnchorStateRegistry registry_); + function attack(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable; + function bondDistributionMode() external view returns (BondDistributionMode); + function challengeRootL2Block(Types.OutputRootProof memory _outputRootProof, bytes memory _headerRLP) external; + function claimCredit(address _recipient) external; + function claimData(uint256) + external + view // nosemgrep + returns ( + uint32 parentIndex, + address counteredBy, + address claimant, + uint128 bond, + Claim claim, + Position position, + Clock clock + ); + function claimDataLen() external view returns (uint256 len_); + function claims(Hash) external view returns (bool); + function clockExtension() external view returns (Duration clockExtension_); + function closeGame() external; + function credit(address _recipient) external view returns (uint256 credit_); + function defend(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable; + function getChallengerDuration(uint256 _claimIndex) external view returns (Duration duration_); + function getNumToResolve(uint256 _claimIndex) external view returns (uint256 numRemainingChildren_); + function getRequiredBond(Position _position) external view returns (uint256 requiredBond_); + function hasUnlockedCredit(address) external view returns (bool); + function initialize() external payable; + function l2BlockNumber() external pure returns (uint256 l2BlockNumber_); + function l2BlockNumberChallenged() external view returns (bool); + function l2BlockNumberChallenger() external view returns (address); + function l2ChainId() external view returns (uint256 l2ChainId_); + function maxClockDuration() external view returns (Duration maxClockDuration_); + function maxGameDepth() external view returns (uint256 maxGameDepth_); + function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) external payable; + function normalModeCredit(address) external view returns (uint256); + function refundModeCredit(address) external view returns (uint256); + function resolutionCheckpoints(uint256) + external + view + returns (bool initialCheckpointComplete, uint32 subgameIndex, Position leftmostPosition, address counteredBy); // nosemgrep + function resolveClaim(uint256 _claimIndex, uint256 _numToResolve) external; + function resolvedSubgames(uint256) external view returns (bool); + function splitDepth() external view returns (uint256 splitDepth_); + function startingBlockNumber() external view returns (uint256 startingBlockNumber_); + function startingOutputRoot() external view returns (Hash root, uint256 l2SequenceNumber); // nosemgrep + function startingRootHash() external view returns (Hash startingRootHash_); + function step(uint256 _claimIndex, bool _isAttack, bytes memory _stateData, bytes memory _proof) external; + function subgames(uint256, uint256) external view returns (uint256); + function version() external pure returns (string memory); + function vm() external view returns (IBigStepper vm_); + function wasRespectedGameTypeWhenCreated() external view returns (bool); + function weth() external view returns (IDelayedWETH weth_); + + error BadAuth(); + + function proposer() external pure returns (address proposer_); + function challenger() external pure returns (address challenger_); + + function __constructor__( + IFaultDisputeGameV2.GameConstructorParams memory _params + ) + external; +} diff --git a/packages/contracts-bedrock/interfaces/safe/ILivenessModule2.sol b/packages/contracts-bedrock/interfaces/safe/ILivenessModule2.sol new file mode 100644 index 0000000000000..64b367c50d2bd --- /dev/null +++ b/packages/contracts-bedrock/interfaces/safe/ILivenessModule2.sol @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { ISemver } from "interfaces/universal/ISemver.sol"; + +/// @title ILivenessModule2 +/// @notice Interface for LivenessModule2, a singleton module for challenge-based ownership transfer +interface ILivenessModule2 is ISemver { + /// @notice Configuration for a Safe's liveness module + struct ModuleConfig { + uint256 livenessResponsePeriod; + address fallbackOwner; + } + + /// @notice Returns the configuration for a Safe + /// @return livenessResponsePeriod The response period + /// @return fallbackOwner The fallback owner address + function livenessSafeConfiguration(address) external view returns (uint256 livenessResponsePeriod, address fallbackOwner); + + /// @notice Returns the challenge start time for a Safe (0 if no challenge) + /// @return The challenge start timestamp + function challengeStartTime(address) external view returns (uint256); + + /// @notice Semantic version + /// @return version The contract version + function version() external view returns (string memory); + + /// @notice Configures the module for a Safe that has already enabled it + /// @param _config The configuration parameters for the module + function configureLivenessModule(ModuleConfig memory _config) external; + + /// @notice Clears the module configuration for a Safe + function clearLivenessModule() external; + + /// @notice Returns challenge_start_time + liveness_response_period if there is a challenge, or 0 if not + /// @param _safe The Safe address to query + /// @return The challenge end timestamp, or 0 if no challenge + function getChallengePeriodEnd(address _safe) external view returns (uint256); + + /// @notice Challenges an enabled safe + /// @param _safe The Safe to challenge + function challenge(address _safe) external; + + /// @notice Responds to a challenge for an enabled safe, canceling it + function respond() external; + + /// @notice Removes all current owners from an enabled safe and appoints fallback as sole owner + /// @param _safe The Safe to transfer ownership of + function changeOwnershipToFallback(address _safe) external; +} diff --git a/packages/contracts-bedrock/interfaces/safe/ITimelockGuard.sol b/packages/contracts-bedrock/interfaces/safe/ITimelockGuard.sol new file mode 100644 index 0000000000000..ccf454985a90d --- /dev/null +++ b/packages/contracts-bedrock/interfaces/safe/ITimelockGuard.sol @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: UNLICENSED +pragma solidity ^0.8.4; + +library Enum { + type Operation is uint8; +} + +interface ITimelockGuard { + enum TransactionState { + NotScheduled, + Pending, + Cancelled, + Executed + } + struct ScheduledTransaction { + uint256 executionTime; + TransactionState state; + ExecTransactionParams params; + } + + struct ExecTransactionParams { + address to; + uint256 value; + bytes data; + Enum.Operation operation; + uint256 safeTxGas; + uint256 baseGas; + uint256 gasPrice; + address gasToken; + address payable refundReceiver; + } + + error TimelockGuard_GuardNotConfigured(); + error TimelockGuard_GuardNotEnabled(); + error TimelockGuard_GuardStillEnabled(); + error TimelockGuard_InvalidTimelockDelay(); + error TimelockGuard_TransactionAlreadyCancelled(); + error TimelockGuard_TransactionAlreadyScheduled(); + error TimelockGuard_TransactionNotScheduled(); + error TimelockGuard_TransactionNotReady(); + error TimelockGuard_TransactionAlreadyExecuted(); + error TimelockGuard_InvalidVersion(); + + event CancellationThresholdUpdated(address indexed safe, uint256 oldThreshold, uint256 newThreshold); + event GuardConfigured(address indexed safe, uint256 timelockDelay); + event TransactionCancelled(address indexed safe, bytes32 indexed txHash); + event TransactionScheduled(address indexed safe, bytes32 indexed txHash, uint256 executionTime); + event TransactionExecuted(address indexed safe, bytes32 txHash); + event Message(string message); + + function cancelTransaction(address _safe, bytes32 _txHash, uint256 _nonce, bytes memory _signatures) external; + function signCancellation(bytes32 _txHash) external; + function cancellationThreshold(address _safe) external view returns (uint256); + function checkTransaction( + address _to, + uint256 _value, + bytes memory _data, + Enum.Operation _operation, + uint256 _safeTxGas, + uint256 _baseGas, + uint256 _gasPrice, + address _gasToken, + address payable _refundReceiver, + bytes memory _signatures, + address _msgSender + ) + external; + function checkAfterExecution(bytes32, bool) external; + function configureTimelockGuard(uint256 _timelockDelay) external; + function scheduledTransaction( + address _safe, + bytes32 _txHash + ) + external + view + returns (ScheduledTransaction memory); + function safeConfigs(address) external view returns (uint256 timelockDelay); + function scheduleTransaction( + address _safe, + uint256 _nonce, + ExecTransactionParams memory _params, + bytes memory _signatures + ) + external; + function version() external view returns (string memory); + function timelockConfiguration(address _safe) external view returns (uint256 timelockDelay); + function maxCancellationThreshold(address _safe) external view returns (uint256); + function pendingTransactions(address _safe) + external + view + returns (ScheduledTransaction[] memory); +} diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 09ed00739d1ac..6bc72727cd28f 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -62,8 +62,8 @@ test-dev *ARGS: build-go-ffi # Default block number for the forked upgrade path. -export sepoliaBlockNumber := "7701807" -export mainnetBlockNumber := "21983965" +export sepoliaBlockNumber := "9118951" +export mainnetBlockNumber := "23327678" export pinnedBlockNumber := if env_var_or_default("FORK_BASE_CHAIN", "") == "mainnet" { mainnetBlockNumber @@ -88,8 +88,9 @@ prepare-upgrade-env *ARGS : build-go-ffi export FORK_BLOCK_NUMBER=$pinnedBlockNumber echo "Running upgrade tests at block $FORK_BLOCK_NUMBER" export FORK_RPC_URL=$ETH_RPC_URL + export FORK_RETRIES=10 + export FORK_BACKOFF=1000 export FORK_TEST=true - export USE_MT_CANNON=true {{ARGS}} \ --match-path "test/{L1,dispute,cannon}/**" @@ -273,6 +274,13 @@ unused-imports-check-no-build: # Checks for unused imports in Solidity contracts. unused-imports-check: build unused-imports-check-no-build +# Checks that the semver of contracts are valid. Does not build contracts. +valid-semver-check-no-build: + go run ./scripts/checks/valid-semver-check/main.go + +# Checks that the semver of contracts are valid. +valid-semver-check: build valid-semver-check-no-build + # Checks that the deploy configs are valid. validate-deploy-configs: ./scripts/checks/check-deploy-configs.sh @@ -298,16 +306,14 @@ semgrep: semgrep-test: cd ../../ && semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ -# Checks that the frozen code has not been modified. -check-frozen-code: - ./scripts/checks/check-frozen-files.sh - # Runs all checks. check: @just semgrep-test-validity-check \ + semgrep \ lint-check \ snapshots-check-no-build \ unused-imports-check-no-build \ + valid-semver-check-no-build \ semver-diff-check-no-build \ validate-deploy-configs \ validate-spacers-no-build \ diff --git a/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh b/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh deleted file mode 100755 index 2a69c1821eda2..0000000000000 --- a/packages/contracts-bedrock/scripts/checks/check-frozen-files.sh +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Grab the directory of the contracts-bedrock package. -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - -# Load semver-utils. -# shellcheck source=/dev/null -source "$SCRIPT_DIR/utils/semver-utils.sh" - -# Path to semver-lock.json. -SEMVER_LOCK="snapshots/semver-lock.json" - -# Create a temporary directory. -temp_dir=$(mktemp -d) -trap 'rm -rf "$temp_dir"' EXIT - -# Exit early if semver-lock.json has not changed. -if ! { git diff origin/develop...HEAD --name-only; git diff --name-only; git diff --cached --name-only; } | grep -q "$SEMVER_LOCK"; then - echo "No changes detected in semver-lock.json" - exit 0 -fi - -# Get the upstream semver-lock.json. -if ! git show origin/develop:packages/contracts-bedrock/snapshots/semver-lock.json > "$temp_dir/upstream_semver_lock.json" 2>/dev/null; then - echo "❌ Error: Could not find semver-lock.json in the snapshots/ directory of develop branch" - exit 1 -fi - -# Copy the local semver-lock.json. -cp "$SEMVER_LOCK" "$temp_dir/local_semver_lock.json" - -# Get the changed contracts. -changed_contracts=$(jq -r ' - def changes: - to_entries as $local - | input as $upstream - | $local | map( - select( - .key as $key - | .value != $upstream[$key] - ) - ) | map(.key); - changes[] -' "$temp_dir/local_semver_lock.json" "$temp_dir/upstream_semver_lock.json") - -# List of files that are allowed to be modified. -# In order to prevent a file from being modified, comment it out. Do not delete it. -# All files in semver-lock.json should be in this list. -ALLOWED_FILES=( - "src/L1/OPContractsManagerStandardValidator.sol:OPContractsManagerStandardValidator" - "src/L1/DataAvailabilityChallenge.sol:DataAvailabilityChallenge" - # "src/L1/ETHLockbox.sol:ETHLockbox" - "src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger" - "src/L1/L1ERC721Bridge.sol:L1ERC721Bridge" - "src/L1/L1StandardBridge.sol:L1StandardBridge" - "src/L1/OPContractsManager.sol:OPContractsManager" - # "src/L1/OptimismPortal2.sol:OptimismPortal2" - "src/L1/ProtocolVersions.sol:ProtocolVersions" - "src/L1/SuperchainConfig.sol:SuperchainConfig" - "src/L1/SystemConfig.sol:SystemConfig" - "src/L2/BaseFeeVault.sol:BaseFeeVault" - "src/L2/CrossL2Inbox.sol:CrossL2Inbox" - "src/L2/ETHLiquidity.sol:ETHLiquidity" - "src/L2/GasPriceOracle.sol:GasPriceOracle" - "src/L2/L1Block.sol:L1Block" - "src/L2/L1FeeVault.sol:L1FeeVault" - "src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger" - "src/L2/L2ERC721Bridge.sol:L2ERC721Bridge" - "src/L2/L2StandardBridge.sol:L2StandardBridge" - "src/L2/L2StandardBridgeInterop.sol:L2StandardBridgeInterop" - "src/L2/L2ToL1MessagePasser.sol:L2ToL1MessagePasser" - "src/L2/L2ToL2CrossDomainMessenger.sol:L2ToL2CrossDomainMessenger" - "src/L2/OptimismMintableERC721.sol:OptimismMintableERC721" - "src/L2/OptimismMintableERC721Factory.sol:OptimismMintableERC721Factory" - "src/L2/OptimismSuperchainERC20.sol:OptimismSuperchainERC20" - "src/L2/OptimismSuperchainERC20Beacon.sol:OptimismSuperchainERC20Beacon" - "src/L2/OptimismSuperchainERC20Factory.sol:OptimismSuperchainERC20Factory" - "src/L2/SequencerFeeVault.sol:SequencerFeeVault" - "src/L2/SuperchainERC20.sol:SuperchainERC20" - "src/L2/SuperchainTokenBridge.sol:SuperchainTokenBridge" - "src/L2/SuperchainETHBridge.sol:SuperchainETHBridge" - "src/L2/WETH.sol:WETH" - "src/cannon/MIPS64.sol:MIPS64" - "src/cannon/PreimageOracle.sol:PreimageOracle" - # "src/dispute/AnchorStateRegistry.sol:AnchorStateRegistry" - "src/dispute/DelayedWETH.sol:DelayedWETH" - # "src/dispute/DisputeGameFactory.sol:DisputeGameFactory" - "src/dispute/FaultDisputeGame.sol:FaultDisputeGame" - "src/dispute/PermissionedDisputeGame.sol:PermissionedDisputeGame" - "src/dispute/SuperFaultDisputeGame.sol:SuperFaultDisputeGame" - "src/dispute/SuperPermissionedDisputeGame.sol:SuperPermissionedDisputeGame" - "src/legacy/DeployerWhitelist.sol:DeployerWhitelist" - "src/legacy/L1BlockNumber.sol:L1BlockNumber" - "src/legacy/LegacyMessagePasser.sol:LegacyMessagePasser" - "src/safe/DeputyPauseModule.sol:DeputyPauseModule" - "src/safe/LivenessGuard.sol:LivenessGuard" - "src/safe/LivenessModule.sol:LivenessModule" - "src/universal/OptimismMintableERC20.sol:OptimismMintableERC20" - "src/universal/OptimismMintableERC20Factory.sol:OptimismMintableERC20Factory" - "src/universal/StorageSetter.sol:StorageSetter" - "src/vendor/asterisc/RISCV.sol:RISCV" - "src/vendor/eas/EAS.sol:EAS" - "src/vendor/eas/SchemaRegistry.sol:SchemaRegistry" -) - -MATCHED_FILES=() -# Check each changed contract against allowed patterns -for contract in $changed_contracts; do - is_allowed=false - for allowed_file in "${ALLOWED_FILES[@]}"; do - if [[ "$contract" == "$allowed_file" ]]; then - is_allowed=true - break - fi - done - if [[ "$is_allowed" == "false" ]]; then - MATCHED_FILES+=("$contract") - fi -done - -if [ ${#MATCHED_FILES[@]} -gt 0 ]; then - echo "❌ Error: Changes detected in files that are not allowed to be modified." - echo "The following files were modified but are not in the allowed list:" - printf ' - %s\n' "${MATCHED_FILES[@]}" - echo "Only the following files can be modified:" - printf ' - %s\n' "${ALLOWED_FILES[@]}" - echo "The code freeze is expected to be lifted no later than 2025-02-20." - exit 1 -fi - -echo "✅ All changes are in allowed files" -exit 0 diff --git a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh index c5a615158ecdd..157a69ec10c01 100755 --- a/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh +++ b/packages/contracts-bedrock/scripts/checks/check-semver-diff.sh @@ -32,14 +32,16 @@ temp_dir=$(mktemp -d) trap 'rm -rf "$temp_dir"' EXIT # Exit early if semver-lock.json has not changed. -if ! { git diff origin/op-es...HEAD --name-only; git diff --name-only; git diff --cached --name-only; } | grep -q "$SEMVER_LOCK"; then +TARGET_BRANCH="${TARGET_BRANCH:-op-es}" +UPSTREAM_REF="origin/${TARGET_BRANCH}" +if ! { git diff "$UPSTREAM_REF"...HEAD --name-only; git diff --name-only; git diff --cached --name-only; } | grep -q "$SEMVER_LOCK"; then echo "No changes detected in semver-lock.json" exit 0 fi # Get the upstream semver-lock.json. -if ! git show origin/op-es:packages/contracts-bedrock/snapshots/semver-lock.json > "$temp_dir/upstream_semver_lock.json" 2>/dev/null; then - echo "❌ Error: Could not find semver-lock.json in the snapshots/ directory of op-es branch" +if ! git show "$UPSTREAM_REF":packages/contracts-bedrock/snapshots/semver-lock.json > "$temp_dir/upstream_semver_lock.json" 2>/dev/null; then + echo "❌ Error: Could not find semver-lock.json in the snapshots/ directory of $TARGET_BRANCH branch" exit 1 fi @@ -80,7 +82,7 @@ for contract in $changed_contracts; do # Extract the old and new source files. old_source_file="$temp_dir/old_${contract##*/}" new_source_file="$temp_dir/new_${contract##*/}" - git show origin/op-es:packages/contracts-bedrock/"$contract" > "$old_source_file" 2>/dev/null || true + git show "$UPSTREAM_REF":packages/contracts-bedrock/"$contract" > "$old_source_file" 2>/dev/null || true cp "$contract" "$new_source_file" # Extract the old and new versions. diff --git a/packages/contracts-bedrock/scripts/checks/interfaces/main.go b/packages/contracts-bedrock/scripts/checks/interfaces/main.go index 196f73d64cd4b..d7138e5f98b65 100644 --- a/packages/contracts-bedrock/scripts/checks/interfaces/main.go +++ b/packages/contracts-bedrock/scripts/checks/interfaces/main.go @@ -43,7 +43,7 @@ var excludeSourceContracts = []string{ "CrossDomainOwnable", "CrossDomainOwnable2", "CrossDomainOwnable3", "CrossDomainMessengerLegacySpacer0", "CrossDomainMessengerLegacySpacer1", // Helper contracts - "SafeSend", "EventLogger", "StorageSetter", "DisputeMonitorHelper", + "SafeSend", "EventLogger", "StorageSetter", "DisputeMonitorHelper", "GameHelper", // Periphery "TransferOnion", "AssetReceiver", "AdminFaucetAuthModule", "CheckSecrets", "CheckBalanceLow", "CheckTrue", "Drippie", "Transactor", "Faucet", diff --git a/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main.go b/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main.go index 6098879cf9557..1b5c6f29d4548 100644 --- a/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main.go +++ b/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main.go @@ -107,20 +107,35 @@ func processFile(artifactPath string) (*common.Void, []error) { return nil, []error{err} } - // Get the AST of OPCM's upgrade function. - opcmUpgradeAst, err := getOpcmUpgradeFunctionAst(opcmAst) - if err != nil { - return nil, []error{err} - } - // Check that there is a call to contract.upgrade. contractName := strings.Split(filepath.Base(artifactPath), ".")[0] typeName := "contract I" + contractName - callType := upgradesContract(opcmUpgradeAst.Body.Statements, "upgrade", typeName, InternalUpgradeFunctionType{ - name: "upgradeToAndCall", - typeName: "function (contract IProxyAdmin,address,address,bytes memory)", - }) + var callType CallType + if contractName == "SuperchainConfig" { + // Get the AST of OPCM's upgradeSuperchainConfig function. + opcmUpgradeSuperchainConfigAst, err := getOpcmUpgradeFunctionAst(opcmAst, "upgradeSuperchainConfig") + if err != nil { + return nil, []error{err} + } + + callType = upgradesContract(opcmUpgradeSuperchainConfigAst.Body.Statements, "upgrade", typeName, InternalUpgradeFunctionType{ + name: "upgradeToAndCall", + typeName: "function (contract IProxyAdmin,address,address,bytes memory)", + }) + } else { + // Get the AST of OPCM's upgrade function. + opcmUpgradeAst, err := getOpcmUpgradeFunctionAst(opcmAst, "_doChainUpgrade") + if err != nil { + return nil, []error{err} + } + + callType = upgradesContract(opcmUpgradeAst.Body.Statements, "upgrade", typeName, InternalUpgradeFunctionType{ + name: "upgradeToAndCall", + typeName: "function (contract IProxyAdmin,address,address,bytes memory)", + }) + } + if callType == NOT_FOUND { return nil, []error{fmt.Errorf("OPCM upgrade function does not call %v.upgrade", contractName)} } @@ -293,14 +308,13 @@ func identifyValidInternalUpgradeCall(expression *solc.Expression, internalFunct // Get the AST of OPCM's upgrade function. // Returns an error if zero or more than one external upgrade function is found. -func getOpcmUpgradeFunctionAst(opcmArtifact *solc.ForgeArtifact) (*solc.AstNode, error) { +func getOpcmUpgradeFunctionAst(opcmArtifact *solc.ForgeArtifact, upgradeFunctionName string) (*solc.AstNode, error) { opcmUpgradeFunctions := []solc.AstNode{} for _, astNode := range opcmArtifact.Ast.Nodes { if astNode.NodeType == "ContractDefinition" && astNode.Name == "OPContractsManagerUpgrader" { for _, node := range astNode.Nodes { if node.NodeType == "FunctionDefinition" && - node.Name == "upgrade" && - node.Visibility == "external" { + node.Name == upgradeFunctionName { opcmUpgradeFunctions = append(opcmUpgradeFunctions, node) } } @@ -308,11 +322,11 @@ func getOpcmUpgradeFunctionAst(opcmArtifact *solc.ForgeArtifact) (*solc.AstNode, } if len(opcmUpgradeFunctions) == 0 { - return nil, fmt.Errorf("no external upgrade function found in OPContractsManagerUpgrader") + return nil, fmt.Errorf("no external %s function found in OPContractsManagerUpgrader", upgradeFunctionName) } if len(opcmUpgradeFunctions) > 1 { - return nil, fmt.Errorf("multiple external upgrade functions found in OPContractsManagerUpgrader, expected 1") + return nil, fmt.Errorf("multiple external %s functions found in OPContractsManagerUpgrader, expected 1", upgradeFunctionName) } return &opcmUpgradeFunctions[0], nil diff --git a/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main_test.go b/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main_test.go index 296e7fb60e6c5..b7b7ef16bea58 100644 --- a/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main_test.go +++ b/packages/contracts-bedrock/scripts/checks/opcm-upgrade-checks/main_test.go @@ -10,13 +10,14 @@ import ( func TestGetOpcmUpgradeFunctionAst(t *testing.T) { tests := []struct { - name string - opcmArtifact *solc.ForgeArtifact - expectedAst *solc.AstNode - expectedError string + name string + opcmArtifact *solc.ForgeArtifact + upgradeFunctionName string + expectedAst *solc.AstNode + expectedError string }{ { - name: "With one external upgrade function", + name: "With one _doChainUpgrade function", opcmArtifact: &solc.ForgeArtifact{ Ast: solc.Ast{ Nodes: []solc.AstNode{ @@ -25,7 +26,7 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { Nodes: []solc.AstNode{ { NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", Nodes: []solc.AstNode{ { @@ -39,9 +40,10 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { }, }, }, + upgradeFunctionName: "_doChainUpgrade", expectedAst: &solc.AstNode{ NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", Nodes: []solc.AstNode{ { @@ -52,7 +54,7 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { expectedError: "", }, { - name: "With an upgrade function but public visibility", + name: "With a _doChainUpgrade function but public visibility", opcmArtifact: &solc.ForgeArtifact{ Ast: solc.Ast{ Nodes: []solc.AstNode{ @@ -61,7 +63,7 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { Nodes: []solc.AstNode{ { NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "public", }, }, @@ -70,11 +72,16 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { }, }, }, - expectedAst: nil, - expectedError: "no external upgrade function found in OPContractsManagerUpgrader", + upgradeFunctionName: "_doChainUpgrade", + expectedAst: &solc.AstNode{ + NodeType: "FunctionDefinition", + Name: "_doChainUpgrade", + Visibility: "public", + }, + expectedError: "", }, { - name: "With an upgrade function and irrelevant function selector", + name: "With a _doChainUpgrade function and irrelevant function selector", opcmArtifact: &solc.ForgeArtifact{ Ast: solc.Ast{ Nodes: []solc.AstNode{ @@ -83,7 +90,7 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { Nodes: []solc.AstNode{ { NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", FunctionSelector: "aabbccdd", Nodes: []solc.AstNode{ @@ -98,9 +105,10 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { }, }, }, + upgradeFunctionName: "_doChainUpgrade", expectedAst: &solc.AstNode{ NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", FunctionSelector: "aabbccdd", Nodes: []solc.AstNode{ @@ -112,7 +120,7 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { expectedError: "", }, { - name: "With multiple external upgrade functions", + name: "With multiple _doChainUpgrade functions", opcmArtifact: &solc.ForgeArtifact{ Ast: solc.Ast{ Nodes: []solc.AstNode{ @@ -121,12 +129,12 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { Nodes: []solc.AstNode{ { NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", }, { NodeType: "FunctionDefinition", - Name: "upgrade", + Name: "_doChainUpgrade", Visibility: "external", }, }, @@ -135,11 +143,12 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { }, }, }, - expectedAst: nil, - expectedError: "multiple external upgrade functions found in OPContractsManagerUpgrader, expected 1", + upgradeFunctionName: "_doChainUpgrade", + expectedAst: nil, + expectedError: "multiple external _doChainUpgrade functions found in OPContractsManagerUpgrader, expected 1", }, { - name: "With no upgrade function", + name: "With no _doChainUpgrade function", opcmArtifact: &solc.ForgeArtifact{ Ast: solc.Ast{ Nodes: []solc.AstNode{ @@ -162,8 +171,9 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { }, }, }, - expectedAst: nil, - expectedError: "no external upgrade function found in OPContractsManagerUpgrader", + upgradeFunctionName: "_doChainUpgrade", + expectedAst: nil, + expectedError: "no external _doChainUpgrade function found in OPContractsManagerUpgrader", }, { name: "With no contract definition", @@ -172,14 +182,15 @@ func TestGetOpcmUpgradeFunctionAst(t *testing.T) { Nodes: []solc.AstNode{}, }, }, - expectedAst: nil, - expectedError: "no external upgrade function found in OPContractsManagerUpgrader", + upgradeFunctionName: "_doChainUpgrade", + expectedAst: nil, + expectedError: "no external _doChainUpgrade function found in OPContractsManagerUpgrader", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ast, err := getOpcmUpgradeFunctionAst(test.opcmArtifact) + ast, err := getOpcmUpgradeFunctionAst(test.opcmArtifact, test.upgradeFunctionName) if test.expectedError == "" { assert.NoError(t, err) diff --git a/packages/contracts-bedrock/scripts/checks/test-validation/exclusions.toml b/packages/contracts-bedrock/scripts/checks/test-validation/exclusions.toml new file mode 100644 index 0000000000000..6c332b4e8a321 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/test-validation/exclusions.toml @@ -0,0 +1,97 @@ + # Test validation exclusions configuration +# This file contains lists of paths and test names that should be excluded +# from various validation checks in the test validation script. + +[excluded_paths] +# PATHS EXCLUDED FROM SRC VALIDATION: +# These paths are excluded because they don't follow the standard naming convention where test +# files (*.t.sol) have corresponding source files (*.sol) in the src/ directory. Instead, they +# follow alternative naming conventions or serve specialized purposes: +# - Some are utility/infrastructure tests that don't test specific contracts +# - Some test external libraries or vendor code that exists elsewhere +# - Some are integration tests that test multiple contracts together +# - Some are specialized test types (invariants, formal verification, etc.) +# +# Resolving these naming inconsistencies is outside the script's scope, but they are +# documented here to avoid false validation failures while maintaining the validation rules +# for standard contract tests. +src_validation = [ + "test/invariants/", # Invariant testing framework - no direct src counterpart + "test/opcm/", # OP Chain Manager tests - may have different structure + "test/scripts/", # Script tests - test deployment/utility scripts, not contracts + "test/integration/", # Integration tests - test multiple contracts together + "test/cannon/MIPS64Memory.t.sol", # Tests external MIPS implementation + "test/dispute/lib/LibClock.t.sol", # Tests library utilities + "test/dispute/lib/LibGameId.t.sol", # Tests library utilities + "test/libraries/DeployUtils.t.sol", # Tests library utilities - no direct src counterpart + "test/setup/DeployVariations.t.sol", # Tests deployment variations + "test/universal/BenchmarkTest.t.sol", # Performance benchmarking tests + "test/universal/ExtendedPause.t.sol", # Tests extended functionality + "test/vendor/Initializable.t.sol", # Tests external vendor code + "test/vendor/InitializableOZv5.t.sol", # Tests external vendor code +] + +# PATHS EXCLUDED FROM CONTRACT NAME FILE PATH VALIDATION: +# These paths are excluded because they don't follow the standard naming convention where the +# contract name matches the file name pattern: __Test. Instead, these +# files contain contracts with names like __Test, where the base +# contract name doesn't match the file name. +# +# This typically occurs when: +# - The test file contains helper contracts or alternative implementations +# - The test file tests multiple related contracts or contract variants +# - The test file uses a different naming strategy for organizational purposes +# - The contracts being tested have complex inheritance or composition patterns +# +# These naming inconsistencies may indicate the presence of specialized test +# infrastructure beyond standard harnesses or different setup contracts patterns. +contract_name_validation = [ + "test/dispute/FaultDisputeGame.t.sol", # Contains contracts not matching FaultDisputeGame base name + "test/dispute/v2/FaultDisputeGameV2.t.sol", # Contains contracts not matching FaultDisputeGameV2 base name + "test/dispute/SuperFaultDisputeGame.t.sol", # Contains contracts not matching SuperFaultDisputeGame base name + "test/L1/ResourceMetering.t.sol", # Contains contracts not matching ResourceMetering base name + "test/L1/OPContractsManagerStandardValidator.t.sol", # Contains contracts not matching OPContractsManagerStandardValidator base name + "test/L2/CrossDomainOwnable.t.sol", # Contains contracts not matching CrossDomainOwnable base name + "test/L2/CrossDomainOwnable2.t.sol", # Contains contracts not matching CrossDomainOwnable2 base name + "test/L2/CrossDomainOwnable3.t.sol", # Contains contracts not matching CrossDomainOwnable3 base name + "test/L2/GasPriceOracle.t.sol", # Contains contracts not matching GasPriceOracle base name + "test/universal/StandardBridge.t.sol", # Contains contracts not matching StandardBridge base name + "test/L1/OPContractsManagerContractsContainer.t.sol", # Contains contracts not matching OPContractsManagerContractsContainer base name + "test/libraries/Blueprint.t.sol", # Contains helper contracts (BlueprintHarness, ConstructorArgMock) + "test/libraries/SafeCall.t.sol", # Contains helper contracts (SimpleSafeCaller) +] + +# PATHS EXCLUDED FROM FUNCTION NAME VALIDATION: +# These paths are excluded because they don't pass the function name validation, which checks +# that the function in the __Test pattern actually exists in the source +# contract's ABI. +# +# Common reasons for exclusion: +# - Internal/Private functions: Some contracts test internal functions that +# aren't exposed in the public ABI, so they can't be validated +# - Misspelled/Incorrect function names: Test contracts may have typos or +# incorrect function names that don't match the actual source contract +# +# Resolving these issues requires either: +# - Enhancing the validation system to support complex structures +# - Fixing misspelled function names in test contracts +# - Restructuring tests to match actual function signatures +function_name_validation = [ + "test/L1/ProxyAdminOwnedBase.t.sol", # Tests internal functions not in ABI + "test/L1/SystemConfig.t.sol", # Tests internal functions not in ABI + "test/safe/SafeSigners.t.sol", # Function name validation issues + "test/libraries/Predeploys.t.sol", # Function 'uncategorizedInterop' doesn't exist in library + "test/libraries/TransientContext.t.sol", # Function 'reentrantAware' doesn't exist in library +] + +[excluded_tests] +# Specific test contract names that should be excluded from validation +# These are individual test contracts that don't follow the standard patterns +# for various reasons specific to their implementation or purpose. +contracts = [ + "OptimismPortal2_MigrateLiquidity_Test", # Interop tests hosted in the OptimismPortal2 test file + "OptimismPortal2_MigrateToSuperRoots_Test", # Interop tests hosted in the OptimismPortal2 test file + "OptimismPortal2_UpgradeInterop_Test", # Interop tests hosted in the OptimismPortal2 test file + "TransactionBuilder", # Transaction builder helper library in TimelockGuard test file + "Constants_Test", # Invalid naming pattern - doesn't specify function or Uncategorized +] diff --git a/packages/contracts-bedrock/scripts/checks/test-validation/main.go b/packages/contracts-bedrock/scripts/checks/test-validation/main.go index 4b32827ab3dfc..776e769ac3fc4 100644 --- a/packages/contracts-bedrock/scripts/checks/test-validation/main.go +++ b/packages/contracts-bedrock/scripts/checks/test-validation/main.go @@ -8,6 +8,7 @@ import ( "strings" "unicode" + "github.com/BurntSushi/toml" "github.com/ethereum-optimism/optimism/op-chain-ops/solc" "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" ) @@ -16,6 +17,18 @@ import ( // Validates test function naming conventions and structure in Forge test artifacts func main() { + // Load exclusions from TOML file relative to script location + scriptDir := filepath.Dir(os.Args[0]) + exclusionsPath := filepath.Join(scriptDir, "exclusions.toml") + // Fall back to local path if running with go run + if _, err := os.Stat(exclusionsPath); os.IsNotExist(err) { + exclusionsPath = "scripts/checks/test-validation/exclusions.toml" + } + if err := loadExclusions(exclusionsPath); err != nil { + fmt.Printf("error loading exclusions: %v\n", err) + os.Exit(1) + } + if _, err := common.ProcessFilesGlob( []string{"forge-artifacts/**/*.t.sol/*.json"}, []string{}, @@ -24,6 +37,8 @@ func main() { fmt.Printf("error: %v\n", err) os.Exit(1) } + + fmt.Println("✅ All contract test validations passed") } // Processes a single test artifact file and runs all validations @@ -147,6 +162,10 @@ func checkTestStructure(artifact *solc.ForgeArtifact) []error { // Validate each contract name in the compilation target for _, contractName := range artifact.Metadata.Settings.CompilationTarget { + if isExcludedTest(contractName) { + continue + } + contractParts := strings.Split(contractName, "_") // Check for initialization test pattern @@ -157,23 +176,13 @@ func checkTestStructure(artifact *solc.ForgeArtifact) []error { // Pattern: _Harness continue } else if len(contractParts) == 3 && contractParts[2] == "Test" { - // Check for uncategorized test pattern - if contractParts[1] == "Uncategorized" || contractParts[1] == "Unclassified" { - // Pattern: _Uncategorized_Test - continue - } else { - // Pattern: __Test - validate function exists - functionName := contractParts[1] - if !checkFunctionExists(artifact, functionName) { - // Convert to camelCase for error message - camelCaseFunctionName := strings.ToLower(functionName[:1]) + functionName[1:] - errors = append(errors, fmt.Errorf("contract '%s': function '%s' does not exist in source contract", contractName, camelCaseFunctionName)) - } - } + errors = append(errors, checkTestMethodName(artifact, contractName, contractParts[1], "")...) } else if len(contractParts) == 3 && contractParts[2] == "Harness" { // Pattern: __Harness // (e.g., OPContractsManager_Upgrade_Harness) continue + } else if len(contractParts) == 4 && contractParts[3] == "Test" { + errors = append(errors, checkTestMethodName(artifact, contractName, contractParts[1], contractParts[2])...) } else { // Invalid naming pattern errors = append(errors, fmt.Errorf("contract '%s': invalid naming pattern. Expected patterns: _TestInit, __Test, or _Uncategorized_Test", contractName)) @@ -183,6 +192,24 @@ func checkTestStructure(artifact *solc.ForgeArtifact) []error { return errors } +func checkTestMethodName(artifact *solc.ForgeArtifact, contractName string, functionName string, _ string) []error { + // Check for uncategorized test pattern + allowedFunctionNames := []string{"Uncategorized", "Integration"} + for _, allowed := range allowedFunctionNames { + if functionName == allowed { + // Pattern: _Uncategorized_Test or _Integration_Test + return nil + } + } + // Pattern: __Test - validate function exists + if !checkFunctionExists(artifact, functionName) { + // Convert to camelCase for error message + camelCaseFunctionName := strings.ToLower(functionName[:1]) + functionName[1:] + return []error{fmt.Errorf("contract '%s': function '%s' does not exist in source contract", contractName, camelCaseFunctionName)} + } + return nil +} + // Artifact and path validation helpers // Extracts the compilation target from the artifact @@ -226,6 +253,11 @@ func checkSrcPath(artifact *solc.ForgeArtifact) bool { // Validates that contract name matches the file path func checkContractNameFilePath(artifact *solc.ForgeArtifact) bool { for filePath, contractName := range artifact.Metadata.Settings.CompilationTarget { + + if isExcludedTest(contractName) { + continue + } + // Split contract name to get the base contract name (before first underscore) contractParts := strings.Split(contractName, "_") // Split file path to get individual path components @@ -256,6 +288,36 @@ func findArtifactPath(contractFileName, contractName string) (string, error) { return files[0], nil } +// Checks if the artifact represents a library +func isLibrary(artifact *solc.ForgeArtifact) bool { + // Check the AST for ContractKind == "library" + for _, node := range artifact.Ast.Nodes { + if node.NodeType == "ContractDefinition" && node.ContractKind == "library" { + return true + } + } + return false +} + +// Extracts function names from the AST (for libraries with internal functions) +func extractFunctionsFromAST(artifact *solc.ForgeArtifact) []string { + var functions []string + + // Navigate through AST to find function definitions + for _, node := range artifact.Ast.Nodes { + if node.NodeType == "ContractDefinition" { + // Iterate through contract nodes to find functions + for _, childNode := range node.Nodes { + if childNode.NodeType == "FunctionDefinition" && childNode.Name != "" { + functions = append(functions, childNode.Name) + } + } + } + } + + return functions +} + // Validates that a function exists in the source contract func checkFunctionExists(artifact *solc.ForgeArtifact, functionName string) bool { // Special functions always exist @@ -287,7 +349,18 @@ func checkFunctionExists(artifact *solc.ForgeArtifact, functionName string) bool return false } - // Check if function exists in the ABI + // Check if source is a library - use AST for internal functions + if isLibrary(srcArtifact) { + functions := extractFunctionsFromAST(srcArtifact) + for _, fn := range functions { + if strings.EqualFold(fn, functionName) { + return true + } + } + return false + } + + // For contracts, check if function exists in the ABI for _, method := range srcArtifact.Abi.Parsed.Methods { if strings.EqualFold(method.Name, functionName) { return true @@ -299,6 +372,40 @@ func checkFunctionExists(artifact *solc.ForgeArtifact, functionName string) bool // Exclusion configuration +// Variables to hold exclusion lists loaded from TOML +var excludedPaths []string +var excludedTests []string + +// Structure to match the TOML file format +type ExclusionsConfig struct { + ExcludedPaths struct { + SrcValidation []string `toml:"src_validation"` + ContractNameValidation []string `toml:"contract_name_validation"` + FunctionNameValidation []string `toml:"function_name_validation"` + } `toml:"excluded_paths"` + ExcludedTests struct { + Contracts []string `toml:"contracts"` + } `toml:"excluded_tests"` +} + +// Loads exclusion lists from the TOML configuration file +func loadExclusions(configPath string) error { + var config ExclusionsConfig + if _, err := toml.DecodeFile(configPath, &config); err != nil { + return fmt.Errorf("failed to decode TOML file: %w", err) + } + + // Combine all excluded paths into a single list + excludedPaths = append(excludedPaths, config.ExcludedPaths.SrcValidation...) + excludedPaths = append(excludedPaths, config.ExcludedPaths.ContractNameValidation...) + excludedPaths = append(excludedPaths, config.ExcludedPaths.FunctionNameValidation...) + + // Load excluded test contracts + excludedTests = config.ExcludedTests.Contracts + + return nil +} + // Checks if a file path should be excluded from validation func isExcluded(filePath string) bool { for _, excluded := range excludedPaths { @@ -309,79 +416,14 @@ func isExcluded(filePath string) bool { return false } -// Defines the list of paths that should be excluded from validation -var excludedPaths = []string{ - // PATHS EXCLUDED FROM SRC VALIDATION: - // These paths are excluded because they don't follow the standard naming convention where test - // files (*.t.sol) have corresponding source files (*.sol) in the src/ directory. Instead, they - // follow alternative naming conventions or serve specialized purposes: - // - Some are utility/infrastructure tests that don't test specific contracts - // - Some test external libraries or vendor code that exists elsewhere - // - Some are integration tests that test multiple contracts together - // - Some are specialized test types (invariants, formal verification, etc.) - // - // Resolving these naming inconsistencies is outside the script's scope, but they are - // documented here to avoid false validation failures while maintaining the validation rules - // for standard contract tests. - "test/invariants/", // Invariant testing framework - no direct src counterpart - "test/opcm/", // OP Chain Manager tests - may have different structure - "test/scripts/", // Script tests - test deployment/utility scripts, not contracts - "test/integration/", // Integration tests - test multiple contracts together - "test/cannon/MIPS64Memory.t.sol", // Tests external MIPS implementation - "test/dispute/lib/LibClock.t.sol", // Tests library utilities - "test/dispute/lib/LibGameId.t.sol", // Tests library utilities - "test/setup/DeployVariations.t.sol", // Tests deployment variations - "test/universal/BenchmarkTest.t.sol", // Performance benchmarking tests - "test/universal/ExtendedPause.t.sol", // Tests extended functionality - "test/vendor/Initializable.t.sol", // Tests external vendor code - "test/vendor/InitializableOZv5.t.sol", // Tests external vendor code - - // PATHS EXCLUDED FROM CONTRACT NAME FILE PATH VALIDATION: - // These paths are excluded because they don't follow the standard naming convention where the - // contract name matches the file name pattern: __Test. Instead, these - // files contain contracts with names like __Test, where the base - // contract name doesn't match the file name. - // - // This typically occurs when: - // - The test file contains helper contracts or alternative implementations - // - The test file tests multiple related contracts or contract variants - // - The test file uses a different naming strategy for organizational purposes - // - The contracts being tested have complex inheritance or composition patterns - // - // These naming inconsistencies may indicate the presence of specialized test - // infrastructure beyond standard harnesses or different setup contracts patterns. - "test/dispute/FaultDisputeGame.t.sol", // Contains contracts not matching FaultDisputeGame base name - "test/dispute/SuperFaultDisputeGame.t.sol", // Contains contracts not matching SuperFaultDisputeGame base name - "test/L1/ResourceMetering.t.sol", // Contains contracts not matching ResourceMetering base name - "test/L1/OPContractsManagerStandardValidator.t.sol", // Contains contracts not matching OPContractsManagerStandardValidator base name - "test/L2/CrossDomainOwnable.t.sol", // Contains contracts not matching CrossDomainOwnable base name - "test/L2/CrossDomainOwnable2.t.sol", // Contains contracts not matching CrossDomainOwnable2 base name - "test/L2/CrossDomainOwnable3.t.sol", // Contains contracts not matching CrossDomainOwnable3 base name - "test/L2/GasPriceOracle.t.sol", // Contains contracts not matching GasPriceOracle base name - "test/universal/StandardBridge.t.sol", // Contains contracts not matching StandardBridge base name - - // PATHS EXCLUDED FROM FUNCTION NAME VALIDATION: - // These paths are excluded because they don't pass the function name validation, which checks - // that the function in the __Test pattern actually exists in the source - // contract's ABI. - // - // Common reasons for exclusion: - // - Libraries: Have different artifact structures that the validation system - // doesn't currently support, making function name lookup impossible - // - Internal/Private functions: Some contracts test internal functions that - // aren't exposed in the public ABI, so they can't be validated - // - Misspelled/Incorrect function names: Test contracts may have typos or - // incorrect function names that don't match the actual source contract - // - // Resolving these issues requires either: - // - Enhancing the validation system to support libraries and complex structures - // - Fixing misspelled function names in test contracts - // - Restructuring tests to match actual function signatures - "test/libraries", // Libraries have different artifact structure, unsupported - "test/dispute/lib/LibPosition.t.sol", // Library testing - artifact structure issues - "test/L1/ProxyAdminOwnedBase.t.sol", // Tests internal functions not in ABI - "test/L1/SystemConfig.t.sol", // Tests internal functions not in ABI - "test/safe/SafeSigners.t.sol", // Function name validation issues +// Checks if a contract name should be excluded from test validation +func isExcludedTest(contractName string) bool { + for _, excluded := range excludedTests { + if excluded == contractName { + return true + } + } + return false } // Defines the signature for test name validation functions diff --git a/packages/contracts-bedrock/scripts/checks/test-validation/main_test.go b/packages/contracts-bedrock/scripts/checks/test-validation/main_test.go index 3224b5d49e1c5..96f2159e331af 100644 --- a/packages/contracts-bedrock/scripts/checks/test-validation/main_test.go +++ b/packages/contracts-bedrock/scripts/checks/test-validation/main_test.go @@ -1,6 +1,8 @@ package main import ( + "os" + "path/filepath" "reflect" "slices" "testing" @@ -9,116 +11,112 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" ) -func TestCamelCaseCheck(t *testing.T) { - tests := []struct { - name string - parts []string - expected bool - }{ - {"valid single part", []string{"test"}, true}, - {"valid multiple parts", []string{"test", "something", "succeeds"}, true}, - {"invalid uppercase", []string{"Test"}, false}, - {"invalid middle uppercase", []string{"test", "Something", "succeeds"}, false}, - {"empty parts", []string{}, true}, +func TestProcessFile(t *testing.T) { + tmpFile := filepath.Join(t.TempDir(), "test.json") + if err := os.WriteFile(tmpFile, []byte(`{"abi":[{"name":"IS_TEST"}],"metadata":{"settings":{"compilationTarget":{"test.sol":"Test"}}}}`), 0644); err != nil { + t.Fatal(err) } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := checks["camelCase"].check(tt.parts); got != tt.expected { - t.Errorf("checkCamelCase error for %v = %v, want %v", tt.parts, got, tt.expected) - } - }) + _, errors := processFile(tmpFile) + if len(errors) == 0 { + t.Error("expected error for invalid test name") } } -func TestPartsCountCheck(t *testing.T) { - tests := []struct { - name string - parts []string - expected bool - }{ - {"three parts", []string{"test", "something", "succeeds"}, true}, - {"four parts", []string{"test", "something", "reason", "fails"}, true}, - {"too few parts", []string{"test", "fails"}, false}, - {"too many parts", []string{"test", "a", "b", "c", "fails"}, false}, - {"empty parts", []string{}, false}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := checks["partsCount"].check(tt.parts); got != tt.expected { - t.Errorf("checkPartsCount error for %v = %v, want %v", tt.parts, got, tt.expected) - } - }) - } -} - -func TestPrefixCheck(t *testing.T) { - tests := []struct { - name string - parts []string - expected bool - }{ - {"valid test", []string{"test", "something", "succeeds"}, true}, - {"valid testFuzz", []string{"testFuzz", "something", "succeeds"}, true}, - {"valid testDiff", []string{"testDiff", "something", "succeeds"}, true}, - {"invalid prefix", []string{"testing", "something", "succeeds"}, false}, - {"empty parts", []string{}, false}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := checks["prefix"].check(tt.parts); got != tt.expected { - t.Errorf("checkPrefix error for %v = %v, want %v", tt.parts, got, tt.expected) - } - }) - } -} - -func TestSuffixCheck(t *testing.T) { - tests := []struct { - name string - parts []string - expected bool - }{ - {"valid succeeds", []string{"test", "something", "succeeds"}, true}, - {"valid reverts", []string{"test", "something", "reverts"}, true}, - {"valid fails", []string{"test", "something", "fails"}, true}, - {"valid works", []string{"test", "something", "works"}, true}, - {"valid benchmark", []string{"test", "something", "benchmark"}, true}, - {"valid benchmark_num", []string{"test", "something", "benchmark", "123"}, true}, - {"invalid suffix", []string{"test", "something", "invalid"}, false}, - {"invalid benchmark_text", []string{"test", "something", "benchmark", "abc"}, false}, - {"empty parts", []string{}, false}, +func TestValidateTestName(t *testing.T) { + artifact := &solc.ForgeArtifact{ + Abi: solc.AbiType{ + Parsed: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "test_valid_succeeds": {Name: "test_valid_succeeds"}, + "test_invalid_bad": {Name: "test_invalid_bad"}, + }, + }, + }, } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := checks["suffix"].check(tt.parts); got != tt.expected { - t.Errorf("checkSuffix error for %v = %v, want %v", tt.parts, got, tt.expected) - } - }) + errors := validateTestName(artifact) + if len(errors) != 1 { + t.Errorf("validateTestName() expected 1 error, got %d", len(errors)) } } -func TestFailurePartsCheck(t *testing.T) { +func TestExtractTestNames(t *testing.T) { tests := []struct { name string - parts []string - expected bool + artifact *solc.ForgeArtifact + want []string }{ - {"valid failure with reason", []string{"test", "something", "reason", "fails"}, true}, - {"valid failure with reason", []string{"test", "something", "reason", "reverts"}, true}, - {"invalid failure without reason", []string{"test", "something", "fails"}, false}, - {"invalid failure without reason", []string{"test", "something", "reverts"}, false}, - {"valid non-failure with three parts", []string{"test", "something", "succeeds"}, true}, - {"empty parts", []string{}, false}, + { + name: "valid test contract", + artifact: &solc.ForgeArtifact{ + Abi: solc.AbiType{ + Parsed: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "test_something_succeeds": {Name: "test_something_succeeds"}, + "test_other_fails": {Name: "test_other_fails"}, + "not_a_test": {Name: "not_a_test"}, + "testFuzz_something_works": {Name: "testFuzz_something_works"}, + }, + }, + }, + }, + want: []string{ + "test_something_succeeds", + "test_other_fails", + "testFuzz_something_works", + }, + }, + { + name: "non-test contract", + artifact: &solc.ForgeArtifact{ + Abi: solc.AbiType{ + Parsed: abi.ABI{ + Methods: map[string]abi.Method{ + "test_something_succeeds": {Name: "test_something_succeeds"}, + "not_a_test": {Name: "not_a_test"}, + }, + }, + }, + }, + want: nil, + }, + { + name: "empty contract", + artifact: &solc.ForgeArtifact{ + Abi: solc.AbiType{ + Parsed: abi.ABI{ + Methods: map[string]abi.Method{}, + }, + }, + }, + want: nil, + }, + { + name: "test contract with no test methods", + artifact: &solc.ForgeArtifact{ + Abi: solc.AbiType{ + Parsed: abi.ABI{ + Methods: map[string]abi.Method{ + "IS_TEST": {Name: "IS_TEST"}, + "not_a_test": {Name: "not_a_test"}, + "another_method": {Name: "another_method"}, + }, + }, + }, + }, + want: []string{}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := checks["failureParts"].check(tt.parts); got != tt.expected { - t.Errorf("checkFailureParts error for %v = %v, want %v", tt.parts, got, tt.expected) + got := extractTestNames(tt.artifact) + slices.Sort(got) + slices.Sort(tt.want) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("extractTestNames() = %v, want %v", got, tt.want) } }) } @@ -209,82 +207,422 @@ func TestCheckTestName(t *testing.T) { } } -func TestExtractTestNames(t *testing.T) { +func TestValidateTestStructure(t *testing.T) { + excludedPaths = []string{"test/excluded/"} + defer func() { excludedPaths = nil }() + artifact := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test/excluded/Contract.t.sol": "Contract_Test"}}}} + if errors := validateTestStructure(artifact); len(errors) != 0 { + t.Errorf("expected no errors for excluded path, got %d", len(errors)) + } +} + +func TestCheckTestStructure(t *testing.T) { + valid := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test.sol": "Contract_TestInit"}}}} + invalid := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test.sol": "Invalid_Pattern"}}}} + if len(checkTestStructure(valid)) > 0 { + t.Error("valid pattern should not error") + } + if len(checkTestStructure(invalid)) == 0 { + t.Error("invalid pattern should error") + } +} + +func TestGetCompilationTarget(t *testing.T) { tests := []struct { - name string - artifact *solc.ForgeArtifact - want []string + name string + artifact *solc.ForgeArtifact + wantPath string + wantContract string + wantErr bool }{ { - name: "valid test contract", + name: "single target", artifact: &solc.ForgeArtifact{ - Abi: solc.AbiType{ - Parsed: abi.ABI{ - Methods: map[string]abi.Method{ - "IS_TEST": {Name: "IS_TEST"}, - "test_something_succeeds": {Name: "test_something_succeeds"}, - "test_other_fails": {Name: "test_other_fails"}, - "not_a_test": {Name: "not_a_test"}, - "testFuzz_something_works": {Name: "testFuzz_something_works"}, - }, + Metadata: solc.ForgeCompilerMetadata{ + Settings: solc.CompilerSettings{ + CompilationTarget: map[string]string{"path/file.sol": "Contract"}, }, }, }, - want: []string{ - "test_something_succeeds", - "test_other_fails", - "testFuzz_something_works", - }, + wantPath: "path/file.sol", + wantContract: "Contract", + wantErr: false, }, { - name: "non-test contract", + name: "no targets", artifact: &solc.ForgeArtifact{ - Abi: solc.AbiType{ - Parsed: abi.ABI{ - Methods: map[string]abi.Method{ - "test_something_succeeds": {Name: "test_something_succeeds"}, - "not_a_test": {Name: "not_a_test"}, - }, + Metadata: solc.ForgeCompilerMetadata{ + Settings: solc.CompilerSettings{ + CompilationTarget: map[string]string{}, }, }, }, - want: nil, + wantErr: true, }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotPath, gotContract, err := getCompilationTarget(tt.artifact) + if (err != nil) != tt.wantErr { + t.Errorf("getCompilationTarget() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotPath != tt.wantPath || gotContract != tt.wantContract { + t.Errorf("getCompilationTarget() = (%v, %v), want (%v, %v)", gotPath, gotContract, tt.wantPath, tt.wantContract) + } + }) + } +} + +func TestCheckSrcPath(t *testing.T) { + tmpDir := t.TempDir() + if err := os.MkdirAll(filepath.Join(tmpDir, "src"), 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(tmpDir, "src", "Contract.sol"), []byte(""), 0644); err != nil { + t.Fatal(err) + } + oldWd, _ := os.Getwd() + defer func() { + if err := os.Chdir(oldWd); err != nil { + t.Error(err) + } + }() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + valid := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test/Contract.t.sol": "Contract_Test"}}}} + invalid := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test/Missing.t.sol": "Missing_Test"}}}} + + if !checkSrcPath(valid) { + t.Error("valid src path should return true") + } + if checkSrcPath(invalid) { + t.Error("invalid src path should return false") + } +} + +func TestCheckContractNameFilePath(t *testing.T) { + tests := []struct { + name string + artifact *solc.ForgeArtifact + want bool + }{ { - name: "empty contract", + name: "matching name", artifact: &solc.ForgeArtifact{ - Abi: solc.AbiType{ - Parsed: abi.ABI{ - Methods: map[string]abi.Method{}, + Metadata: solc.ForgeCompilerMetadata{ + Settings: solc.CompilerSettings{ + CompilationTarget: map[string]string{"test/Contract.t.sol": "Contract_Test"}, }, }, }, - want: nil, + want: true, }, { - name: "test contract with no test methods", + name: "non-matching name", artifact: &solc.ForgeArtifact{ - Abi: solc.AbiType{ - Parsed: abi.ABI{ - Methods: map[string]abi.Method{ - "IS_TEST": {Name: "IS_TEST"}, - "not_a_test": {Name: "not_a_test"}, - "another_method": {Name: "another_method"}, - }, + Metadata: solc.ForgeCompilerMetadata{ + Settings: solc.CompilerSettings{ + CompilationTarget: map[string]string{"test/Contract.t.sol": "Other_Test"}, }, }, }, - want: []string{}, + want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := extractTestNames(tt.artifact) - slices.Sort(got) - slices.Sort(tt.want) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("extractTestNames() = %v, want %v", got, tt.want) + if got := checkContractNameFilePath(tt.artifact); got != tt.want { + t.Errorf("checkContractNameFilePath() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFindArtifactPath(t *testing.T) { + tmpDir := t.TempDir() + if err := os.MkdirAll(filepath.Join(tmpDir, "forge-artifacts", "Contract.sol"), 0755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(tmpDir, "forge-artifacts", "Contract.sol", "Contract.json"), []byte("{}"), 0644); err != nil { + t.Fatal(err) + } + oldWd, _ := os.Getwd() + defer func() { + if err := os.Chdir(oldWd); err != nil { + t.Error(err) + } + }() + if err := os.Chdir(tmpDir); err != nil { + t.Fatal(err) + } + + if _, err := findArtifactPath("Contract.sol", "Contract"); err != nil { + t.Error("existing contract should not error") + } + if _, err := findArtifactPath("Missing.sol", "Missing"); err == nil { + t.Error("missing contract should error") + } +} + +func TestIsLibrary(t *testing.T) { + library := &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + {NodeType: "ContractDefinition", ContractKind: "library"}, + }, + }, + } + contract := &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + {NodeType: "ContractDefinition", ContractKind: "contract"}, + }, + }, + } + if !isLibrary(library) { + t.Error("library artifact should be detected as library") + } + if isLibrary(contract) { + t.Error("contract artifact should not be detected as library") + } +} + +func TestExtractFunctionsFromAST(t *testing.T) { + artifact := &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + { + NodeType: "ContractDefinition", + Nodes: []solc.AstNode{ + {NodeType: "FunctionDefinition", Name: "add"}, + {NodeType: "FunctionDefinition", Name: "subtract"}, + {NodeType: "VariableDeclaration", Name: "ignored"}, + }, + }, + }, + }, + } + functions := extractFunctionsFromAST(artifact) + if len(functions) != 2 { + t.Errorf("expected 2 functions, got %d", len(functions)) + } + if functions[0] != "add" || functions[1] != "subtract" { + t.Errorf("unexpected function names: %v", functions) + } +} + +func TestCheckFunctionExists(t *testing.T) { + artifact := &solc.ForgeArtifact{Metadata: solc.ForgeCompilerMetadata{Settings: solc.CompilerSettings{CompilationTarget: map[string]string{"test/Contract.t.sol": "Contract_Test"}}}} + if !checkFunctionExists(artifact, "constructor") { + t.Error("constructor should always exist") + } + if checkFunctionExists(artifact, "nonexistent") { + t.Error("nonexistent function should not exist") + } +} + +func TestLoadExclusions(t *testing.T) { + tmpFile := filepath.Join(t.TempDir(), "test.toml") + if err := os.WriteFile(tmpFile, []byte(`[excluded_paths] +src_validation = ["path1"] +[excluded_tests] +contracts = ["Test1"]`), 0644); err != nil { + t.Fatal(err) + } + + excludedPaths, excludedTests = nil, nil + defer func() { excludedPaths, excludedTests = nil, nil }() + + if err := loadExclusions(tmpFile); err != nil { + t.Error("loadExclusions should not error") + } + if len(excludedPaths) != 1 || len(excludedTests) != 1 { + t.Error("expected 1 excluded path and 1 excluded test") + } +} + +func TestIsExcluded(t *testing.T) { + excludedPaths = []string{"test/excluded/", "other/path/"} + defer func() { excludedPaths = nil }() + + tests := []struct { + name string + filePath string + want bool + }{ + {"excluded path", "test/excluded/file.sol", true}, + {"other excluded", "other/path/file.sol", true}, + {"not excluded", "test/normal/file.sol", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isExcluded(tt.filePath); got != tt.want { + t.Errorf("isExcluded() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestIsExcludedTest(t *testing.T) { + excludedTests = []string{"ExcludedContract", "AnotherExcluded"} + defer func() { excludedTests = nil }() + + tests := []struct { + name string + contractName string + want bool + }{ + {"excluded contract", "ExcludedContract", true}, + {"another excluded", "AnotherExcluded", true}, + {"not excluded", "NormalContract", false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isExcludedTest(tt.contractName); got != tt.want { + t.Errorf("isExcludedTest() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCamelCaseCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid single part", []string{"test"}, true}, + {"valid multiple parts", []string{"test", "something", "succeeds"}, true}, + {"invalid uppercase", []string{"Test"}, false}, + {"invalid middle uppercase", []string{"test", "Something", "succeeds"}, false}, + {"empty parts", []string{}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["camelCase"].check(tt.parts); got != tt.expected { + t.Errorf("checkCamelCase error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPartsCountCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"three parts", []string{"test", "something", "succeeds"}, true}, + {"four parts", []string{"test", "something", "reason", "fails"}, true}, + {"too few parts", []string{"test", "fails"}, false}, + {"too many parts", []string{"test", "a", "b", "c", "fails"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["partsCount"].check(tt.parts); got != tt.expected { + t.Errorf("checkPartsCount error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestPrefixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid test", []string{"test", "something", "succeeds"}, true}, + {"valid testFuzz", []string{"testFuzz", "something", "succeeds"}, true}, + {"valid testDiff", []string{"testDiff", "something", "succeeds"}, true}, + {"invalid prefix", []string{"testing", "something", "succeeds"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["prefix"].check(tt.parts); got != tt.expected { + t.Errorf("checkPrefix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestSuffixCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid succeeds", []string{"test", "something", "succeeds"}, true}, + {"valid reverts", []string{"test", "something", "reverts"}, true}, + {"valid fails", []string{"test", "something", "fails"}, true}, + {"valid works", []string{"test", "something", "works"}, true}, + {"valid benchmark", []string{"test", "something", "benchmark"}, true}, + {"valid benchmark_num", []string{"test", "something", "benchmark", "123"}, true}, + {"invalid suffix", []string{"test", "something", "invalid"}, false}, + {"invalid benchmark_text", []string{"test", "something", "benchmark", "abc"}, false}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["suffix"].check(tt.parts); got != tt.expected { + t.Errorf("checkSuffix error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestFailurePartsCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid failure with reason", []string{"test", "something", "reason", "fails"}, true}, + {"valid failure with reason", []string{"test", "something", "reason", "reverts"}, true}, + {"invalid failure without reason", []string{"test", "something", "fails"}, false}, + {"invalid failure without reason", []string{"test", "something", "reverts"}, false}, + {"valid non-failure with three parts", []string{"test", "something", "succeeds"}, true}, + {"empty parts", []string{}, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["failureParts"].check(tt.parts); got != tt.expected { + t.Errorf("checkFailureParts error for %v = %v, want %v", tt.parts, got, tt.expected) + } + }) + } +} + +func TestDoubleUnderscoresCheck(t *testing.T) { + tests := []struct { + name string + parts []string + expected bool + }{ + {"valid no empty", []string{"test", "something", "succeeds"}, true}, + {"invalid empty part", []string{"test", "", "succeeds"}, false}, + {"invalid multiple empty", []string{"test", "", "", "succeeds"}, false}, + {"empty parts", []string{}, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checks["doubleUnderscores"].check(tt.parts); got != tt.expected { + t.Errorf("doubleUnderscores check for %v = %v, want %v", tt.parts, got, tt.expected) } }) } diff --git a/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go new file mode 100644 index 0000000000000..09056e06b6787 --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main.go @@ -0,0 +1,115 @@ +package main + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/ethereum-optimism/optimism/packages/contracts-bedrock/scripts/checks/common" +) + +func main() { + if _, err := common.ProcessFilesGlob( + []string{"forge-artifacts/**/*.json"}, + []string{"forge-artifacts/L2StandardBridgeInterop.sol/**.json", "forge-artifacts/OptimismPortalInterop.sol/**.json", "forge-artifacts/RISCV.sol/**.json", "forge-artifacts/EAS.sol/**.json", "forge-artifacts/SchemaRegistry.sol/**.json"}, + processFile, + ); err != nil { + fmt.Printf("Error: %v/n", err) + os.Exit(1) + } +} + +func processFile(path string) (*common.Void, []error) { + artifact, err := common.ReadForgeArtifact(path) + if err != nil { + return nil, []error{err} + } + + // Only check src/ contracts. + if !strings.HasPrefix(artifact.Ast.AbsolutePath, "src/") { + return nil, nil + } + + version, err := getVersion(artifact) + if err != nil { + return nil, nil + } + + err = assertValidSemver(version) + if err != nil { + return nil, []error{err} + } + + fmt.Println("✅ ", artifact.Ast.AbsolutePath) + + return nil, nil +} + +func getVersion(artifact *solc.ForgeArtifact) (string, error) { + for _, node := range artifact.Ast.Nodes { + if node.NodeType == "ContractDefinition" { + // Check if there is a version constant definition. + for _, subNode := range node.Nodes { + if subNode.NodeType == "VariableDeclaration" && + subNode.Mutability == "constant" && + subNode.Name == "version" && + subNode.Visibility == "public" { + if subNode.Value.(map[string]interface{})["value"] == nil { + fmt.Println("WARNING: version constant value is nil", node.Name) + return "", nil + } + return subNode.Value.(map[string]interface{})["value"].(string), nil + } + } + + // Check if there is a version function definition. + for _, subNode := range node.Nodes { + if subNode.NodeType == "FunctionDefinition" && + subNode.Name == "version" && + subNode.Visibility == "public" { + if subNode.Body.Statements == nil { + return "", fmt.Errorf("version function has no body") + } + if len(subNode.Body.Statements) != 1 || subNode.Body.Statements[0].NodeType != "Return" { + return "", fmt.Errorf("expected version function to have a single statement that returns the version string") + } + if subNode.Body.Statements[0].Expression.Value == nil { + fmt.Println("WARNING: version function value is nil", node.Name) + return "", nil + } + + return subNode.Body.Statements[0].Expression.Value.(string), nil + } + } + } + } + + return "", fmt.Errorf("version function or constant definition not found") +} + +func assertValidSemver(version string) error { + parts := strings.Split(version, ".") + + if len(parts) != 3 { + return fmt.Errorf("version should be 3 parts") + } + + _, err := strconv.Atoi(parts[0]) + if err != nil { + return fmt.Errorf("major version should be a number") + } + + _, err = strconv.Atoi(parts[1]) + if err != nil { + return fmt.Errorf("minor version should be a number") + } + + _, err = strconv.Atoi(parts[2]) + if err != nil { + return fmt.Errorf("patch version should be a number") + } + + return nil +} diff --git a/packages/contracts-bedrock/scripts/checks/valid-semver-check/main_test.go b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main_test.go new file mode 100644 index 0000000000000..8a7641b32f5bd --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/valid-semver-check/main_test.go @@ -0,0 +1,84 @@ +package main + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/solc" + "github.com/stretchr/testify/assert" +) + +func TestAssertValidSemver(t *testing.T) { + tests := []struct { + name string + version string + wantErr bool + }{ + {name: "Valid semver", version: "1.0.0", wantErr: false}, + {name: "Invalid semver", version: "1.0.0-beta.1", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := assertValidSemver(tt.version) + assert.Equal(t, tt.wantErr, err != nil) + }) + } +} + +func TestGetVersion(t *testing.T) { + tests := []struct { + name string + artifact *solc.ForgeArtifact + want string + wantErr bool + }{ + { + name: "Semver constant definition found", + artifact: &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + {NodeType: "ContractDefinition", Nodes: []solc.AstNode{ + {NodeType: "VariableDeclaration", Name: "version", Visibility: "public", Mutability: "constant", Value: map[string]interface{}{"value": "1.0.0"}}, + }}, + }, + }, + }, + want: "1.0.0", + wantErr: false, + }, + { + name: "Semver function definition found", + artifact: &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + {NodeType: "ContractDefinition", Nodes: []solc.AstNode{ + {NodeType: "FunctionDefinition", Name: "version", Visibility: "public", Body: &solc.AstBlock{Statements: []solc.AstNode{{NodeType: "Return", Expression: &solc.Expression{Value: "1.0.0"}}}}}, + }}, + }, + }, + }, + want: "1.0.0", + wantErr: false, + }, + { + name: "Semver function definition not found", + artifact: &solc.ForgeArtifact{ + Ast: solc.Ast{ + Nodes: []solc.AstNode{ + {NodeType: "ContractDefinition", Nodes: []solc.AstNode{}}, + }, + }, + }, + want: "", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getVersion(tt.artifact) + assert.Equal(t, tt.wantErr, err != nil) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index ebcf310517235..0f14886ecdcd9 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -7,7 +7,6 @@ import { console2 as console } from "forge-std/console2.sol"; // Scripts import { DeployConfig } from "scripts/deploy/DeployConfig.s.sol"; -import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployImplementations } from "scripts/deploy/DeployImplementations.s.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; @@ -32,7 +31,7 @@ import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol" import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; @@ -54,68 +53,73 @@ library ChainAssertions { } /// @notice Asserts that the SystemConfig is setup correctly - function checkSystemConfig( + function checkSystemConfigImpls(Types.ContractSet memory _contracts) internal view { + ISystemConfig config = ISystemConfig(_contracts.SystemConfig); + console.log("Running chain assertions on the SystemConfig impl at %s", address(config)); + + // Check that the contract is initialized + DeployUtils.assertInitialized({ _contractAddress: address(config), _isProxy: false, _slot: 0, _offset: 0 }); + + IResourceMetering.ResourceConfig memory resourceConfig = config.resourceConfig(); + + require(config.owner() == address(0), "CHECK-SCFG-220"); + require(config.overhead() == 0, "CHECK-SCFG-230"); + require(config.scalar() == 0, "CHECK-SCFG-240"); // version 1 + require(config.basefeeScalar() == 0, "CHECK-SCFG-250"); + require(config.blobbasefeeScalar() == 0, "CHECK-SCFG-260"); + require(config.batcherHash() == bytes32(0), "CHECK-SCFG-270"); + require(config.gasLimit() == 0, "CHECK-SCFG-280"); + require(config.unsafeBlockSigner() == address(0), "CHECK-SCFG-290"); + // Check _config + require(resourceConfig.maxResourceLimit == 0, "CHECK-SCFG-300"); + require(resourceConfig.elasticityMultiplier == 0, "CHECK-SCFG-310"); + require(resourceConfig.baseFeeMaxChangeDenominator == 0, "CHECK-SCFG-320"); + require(resourceConfig.systemTxMaxGas == 0, "CHECK-SCFG-330"); + require(resourceConfig.minimumBaseFee == 0, "CHECK-SCFG-340"); + require(resourceConfig.maximumBaseFee == 0, "CHECK-SCFG-350"); + // Check _addresses + require(config.startBlock() == type(uint256).max, "CHECK-SCFG-360"); + require(config.batchInbox() == address(0), "CHECK-SCFG-370"); + require(config.l1CrossDomainMessenger() == address(0), "CHECK-SCFG-380"); + require(config.l1ERC721Bridge() == address(0), "CHECK-SCFG-390"); + require(config.l1StandardBridge() == address(0), "CHECK-SCFG-400"); + require(config.optimismPortal() == address(0), "CHECK-SCFG-420"); + require(config.optimismMintableERC20Factory() == address(0), "CHECK-SCFG-430"); + } + + /// @notice Asserts that the SystemConfig is setup correctly + function checkSystemConfigProxies( Types.ContractSet memory _contracts, - DeployOPChainInput _doi, - bool _isProxy + Types.DeployOPChainInput memory _doi ) internal view { ISystemConfig config = ISystemConfig(_contracts.SystemConfig); - console.log( - "Running chain assertions on the SystemConfig %s at %s", - _isProxy ? "proxy" : "implementation", - address(config) - ); + console.log("Running chain assertions on the SystemConfig proxy at %s", address(config)); // Check that the contract is initialized - DeployUtils.assertInitialized({ _contractAddress: address(config), _isProxy: _isProxy, _slot: 0, _offset: 0 }); - - IResourceMetering.ResourceConfig memory resourceConfig = config.resourceConfig(); - - if (_isProxy) { - require(config.owner() == _doi.systemConfigOwner(), "CHECK-SCFG-10"); - require(config.basefeeScalar() == _doi.basefeeScalar(), "CHECK-SCFG-20"); - require(config.blobbasefeeScalar() == _doi.blobBaseFeeScalar(), "CHECK-SCFG-30"); - require(config.batcherHash() == bytes32(uint256(uint160(_doi.batcher()))), "CHECK-SCFG-40"); - require(config.gasLimit() == uint64(_doi.gasLimit()), "CHECK-SCFG-50"); - require(config.unsafeBlockSigner() == _doi.unsafeBlockSigner(), "CHECK-SCFG-60"); - require(config.scalar() >> 248 == 1, "CHECK-SCFG-70"); - // Depends on start block being set to 0 in `initialize` - require(config.startBlock() == block.number, "CHECK-SCFG-140"); - require(config.batchInbox() == _doi.opcm().chainIdToBatchInboxAddress(_doi.l2ChainId()), "CHECK-SCFG-150"); - // Check _addresses - require(config.l1CrossDomainMessenger() == _contracts.L1CrossDomainMessenger, "CHECK-SCFG-160"); - require(config.l1ERC721Bridge() == _contracts.L1ERC721Bridge, "CHECK-SCFG-170"); - require(config.l1StandardBridge() == _contracts.L1StandardBridge, "CHECK-SCFG-180"); - require(config.optimismPortal() == _contracts.OptimismPortal, "CHECK-SCFG-200"); - require(config.optimismMintableERC20Factory() == _contracts.OptimismMintableERC20Factory, "CHECK-SCFG-210"); - } else { - require(config.owner() == address(0), "CHECK-SCFG-220"); - require(config.overhead() == 0, "CHECK-SCFG-230"); - require(config.scalar() == 0, "CHECK-SCFG-240"); // version 1 - require(config.basefeeScalar() == 0, "CHECK-SCFG-250"); - require(config.blobbasefeeScalar() == 0, "CHECK-SCFG-260"); - require(config.batcherHash() == bytes32(0), "CHECK-SCFG-270"); - require(config.gasLimit() == 0, "CHECK-SCFG-280"); - require(config.unsafeBlockSigner() == address(0), "CHECK-SCFG-290"); - // Check _config - require(resourceConfig.maxResourceLimit == 0, "CHECK-SCFG-300"); - require(resourceConfig.elasticityMultiplier == 0, "CHECK-SCFG-310"); - require(resourceConfig.baseFeeMaxChangeDenominator == 0, "CHECK-SCFG-320"); - require(resourceConfig.systemTxMaxGas == 0, "CHECK-SCFG-330"); - require(resourceConfig.minimumBaseFee == 0, "CHECK-SCFG-340"); - require(resourceConfig.maximumBaseFee == 0, "CHECK-SCFG-350"); - // Check _addresses - require(config.startBlock() == type(uint256).max, "CHECK-SCFG-360"); - require(config.batchInbox() == address(0), "CHECK-SCFG-370"); - require(config.l1CrossDomainMessenger() == address(0), "CHECK-SCFG-380"); - require(config.l1ERC721Bridge() == address(0), "CHECK-SCFG-390"); - require(config.l1StandardBridge() == address(0), "CHECK-SCFG-400"); - require(config.optimismPortal() == address(0), "CHECK-SCFG-420"); - require(config.optimismMintableERC20Factory() == address(0), "CHECK-SCFG-430"); - } + DeployUtils.assertInitialized({ _contractAddress: address(config), _isProxy: true, _slot: 0, _offset: 0 }); + + require(config.owner() == _doi.systemConfigOwner, "CHECK-SCFG-10"); + require(config.basefeeScalar() == _doi.basefeeScalar, "CHECK-SCFG-20"); + require(config.blobbasefeeScalar() == _doi.blobBaseFeeScalar, "CHECK-SCFG-30"); + require(config.batcherHash() == bytes32(uint256(uint160(_doi.batcher))), "CHECK-SCFG-40"); + require(config.gasLimit() == uint64(_doi.gasLimit), "CHECK-SCFG-50"); + require(config.unsafeBlockSigner() == _doi.unsafeBlockSigner, "CHECK-SCFG-60"); + require(config.scalar() >> 248 == 1, "CHECK-SCFG-70"); + // Depends on start block being set to 0 in `initialize` + require(config.startBlock() == block.number, "CHECK-SCFG-140"); + require( + config.batchInbox() == IOPContractsManager(_doi.opcm).chainIdToBatchInboxAddress(_doi.l2ChainId), + "CHECK-SCFG-150" + ); + // Check _addresses + require(config.l1CrossDomainMessenger() == _contracts.L1CrossDomainMessenger, "CHECK-SCFG-160"); + require(config.l1ERC721Bridge() == _contracts.L1ERC721Bridge, "CHECK-SCFG-170"); + require(config.l1StandardBridge() == _contracts.L1StandardBridge, "CHECK-SCFG-180"); + require(config.optimismPortal() == _contracts.OptimismPortal, "CHECK-SCFG-200"); + require(config.optimismMintableERC20Factory() == _contracts.OptimismMintableERC20Factory, "CHECK-SCFG-210"); } /// @notice Asserts that the L1CrossDomainMessenger is setup correctly @@ -200,7 +204,7 @@ library ChainAssertions { } /// @notice Asserts that the MIPs contract is setup correctly - function checkMIPS(IMIPS _mips, IPreimageOracle _oracle) internal view { + function checkMIPS(IMIPS64 _mips, IPreimageOracle _oracle) internal view { console.log("Running chain assertions on the MIPS at %s", address(_mips)); require(address(_mips) != address(0), "CHECK-MIPS-10"); @@ -277,7 +281,10 @@ library ChainAssertions { require(address(portal.superchainConfig()) == address(_superchainConfig), "PORTAL-40"); require(portal.guardian() == _superchainConfig.guardian(), "CHECK-OP2-40"); require(portal.paused() == ISystemConfig(_contracts.SystemConfig).paused(), "CHECK-OP2-60"); - require(address(portal.ethLockbox()) == _contracts.ETHLockbox, "CHECK-OP2-80"); + require( + address(portal.ethLockbox()) == _contracts.ETHLockbox || address(portal.ethLockbox()) == address(0), + "CHECK-OP2-80" + ); require(portal.proxyAdminOwner() == _opChainProxyAdminOwner, "CHECK-OP2-90"); } else { require(address(portal.anchorStateRegistry()) == address(0), "CHECK-OP2-80"); @@ -370,7 +377,7 @@ library ChainAssertions { Types.ContractSet memory _impls, Types.ContractSet memory _proxies, IOPContractsManager _opcm, - IMIPS _mips, + IMIPS64 _mips, IProxyAdmin _superchainProxyAdmin ) internal @@ -380,7 +387,6 @@ library ChainAssertions { require(address(_opcm) != address(0), "CHECK-OPCM-10"); require(bytes(_opcm.version()).length > 0, "CHECK-OPCM-15"); - require(bytes(_opcm.l1ContractsRelease()).length > 0, "CHECK-OPCM-16"); require(address(_opcm.protocolVersions()) == _proxies.ProtocolVersions, "CHECK-OPCM-17"); require(address(_opcm.superchainProxyAdmin()) == address(_superchainProxyAdmin), "CHECK-OPCM-18"); require(address(_opcm.superchainConfig()) == _proxies.SuperchainConfig, "CHECK-OPCM-19"); @@ -436,14 +442,12 @@ library ChainAssertions { ); } - function checkAnchorStateRegistryProxy( - IAnchorStateRegistry _anchorStateRegistryProxy, - bool _isProxy - ) - internal - view - { - // Then we check the proxy as ASR. + function checkAnchorStateRegistryProxy(IAnchorStateRegistry _anchorStateRegistryProxy, bool _isProxy) internal { + DeployUtils.assertValidContractAddress(address(_anchorStateRegistryProxy)); + if (_isProxy) { + DeployUtils.assertERC1967ImplementationSet(address(_anchorStateRegistryProxy)); + } + DeployUtils.assertInitialized({ _contractAddress: address(_anchorStateRegistryProxy), _isProxy: _isProxy, diff --git a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol index 841878845bb3d..fcf70f61cbb1f 100644 --- a/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/Deploy.s.sol @@ -9,7 +9,6 @@ import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Scripts import { Deployer } from "scripts/deploy/Deployer.sol"; -import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { Chains } from "scripts/libraries/Chains.sol"; import { Config } from "scripts/libraries/Config.sol"; import { StateDiff } from "scripts/libraries/StateDiff.sol"; @@ -33,7 +32,7 @@ import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; @@ -274,11 +273,15 @@ contract Deploy is Deployer { proofMaturityDelaySeconds: cfg.proofMaturityDelaySeconds(), disputeGameFinalityDelaySeconds: cfg.disputeGameFinalityDelaySeconds(), mipsVersion: StandardConstants.MIPS_VERSION, - l1ContractsRelease: "dev", + devFeatureBitmap: cfg.devFeatureBitmap(), + faultGameV2MaxGameDepth: cfg.faultGameV2MaxGameDepth(), + faultGameV2SplitDepth: cfg.faultGameV2SplitDepth(), + faultGameV2ClockExtension: cfg.faultGameV2ClockExtension(), + faultGameV2MaxClockDuration: cfg.faultGameV2MaxClockDuration(), protocolVersionsProxy: IProtocolVersions(artifacts.mustGetAddress("ProtocolVersionsProxy")), superchainConfigProxy: superchainConfigProxy, superchainProxyAdmin: superchainProxyAdmin, - upgradeController: superchainProxyAdmin.owner(), + l1ProxyAdminOwner: superchainProxyAdmin.owner(), challenger: cfg.l2OutputOracleChallenger() }) ); @@ -313,17 +316,17 @@ contract Deploy is Deployer { ); ChainAssertions.checkDelayedWETHImpl(IDelayedWETH(payable(impls.DelayedWETH)), cfg.faultGameWithdrawalDelay()); ChainAssertions.checkMIPS({ - _mips: IMIPS(address(dio.mipsSingleton)), + _mips: IMIPS64(address(dio.mipsSingleton)), _oracle: IPreimageOracle(address(dio.preimageOracleSingleton)) }); ChainAssertions.checkOPContractsManager({ _impls: impls, _proxies: _proxies(), _opcm: IOPContractsManager(address(dio.opcm)), - _mips: IMIPS(address(dio.mipsSingleton)), + _mips: IMIPS64(address(dio.mipsSingleton)), _superchainProxyAdmin: superchainProxyAdmin }); - ChainAssertions.checkSystemConfig({ _doi: DeployOPChainInput(address(0)), _contracts: impls, _isProxy: false }); + ChainAssertions.checkSystemConfigImpls(impls); ChainAssertions.checkAnchorStateRegistryProxy(IAnchorStateRegistry(impls.AnchorStateRegistry), false); } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol index 89db6f6244e4e..bfda16442e540 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol @@ -29,6 +29,7 @@ contract DeployConfig is Script { uint256 public l2GenesisFjordTimeOffset; uint256 public l2GenesisGraniteTimeOffset; uint256 public l2GenesisHoloceneTimeOffset; + uint256 public l2GenesisJovianTimeOffset; address public p2pSequencerAddress; address public batchInboxAddress; address public batchSenderAddress; @@ -75,10 +76,17 @@ contract DeployConfig is Script { uint256 public daBondSize; uint256 public daResolverRefundPercentage; + // V2 Dispute Game Configuration + uint256 public faultGameV2MaxGameDepth; + uint256 public faultGameV2SplitDepth; + uint256 public faultGameV2ClockExtension; + uint256 public faultGameV2MaxClockDuration; + bool public useInterop; bool public deploySoulGasToken; bool public isSoulBackedByNative; bool public useUpgradedFork; + bytes32 public devFeatureBitmap; function read(string memory _path) public { console.log("DeployConfig: reading file %s", _path); @@ -98,6 +106,7 @@ contract DeployConfig is Script { l2GenesisFjordTimeOffset = _readOr(_json, "$.l2GenesisFjordTimeOffset", NULL_OFFSET); l2GenesisGraniteTimeOffset = _readOr(_json, "$.l2GenesisGraniteTimeOffset", NULL_OFFSET); l2GenesisHoloceneTimeOffset = _readOr(_json, "$.l2GenesisHoloceneTimeOffset", NULL_OFFSET); + l2GenesisJovianTimeOffset = _readOr(_json, "$.l2GenesisJovianTimeOffset", NULL_OFFSET); p2pSequencerAddress = stdJson.readAddress(_json, "$.p2pSequencerAddress"); batchInboxAddress = stdJson.readAddress(_json, "$.batchInboxAddress"); @@ -153,7 +162,12 @@ contract DeployConfig is Script { useInterop = _readOr(_json, "$.useInterop", false); deploySoulGasToken = _readOr(_json, "$.deploySoulGasToken", false); isSoulBackedByNative = _readOr(_json, "$.isSoulBackedByNative", false); + devFeatureBitmap = bytes32(_readOr(_json, "$.devFeatureBitmap", 0)); useUpgradedFork; + faultGameV2MaxGameDepth = _readOr(_json, "$.faultGameV2MaxGameDepth", 73); + faultGameV2SplitDepth = _readOr(_json, "$.faultGameV2SplitDepth", 30); + faultGameV2ClockExtension = _readOr(_json, "$.faultGameV2ClockExtension", 10800); + faultGameV2MaxClockDuration = _readOr(_json, "$.faultGameV2MaxClockDuration", 302400); } function fork() public view returns (Fork fork_) { @@ -220,6 +234,11 @@ contract DeployConfig is Script { fundDevAccounts = _fundDevAccounts; } + /// @notice Allow the `devFeatureBitmap` config to be overridden in testing environments + function setDevFeatureBitmap(bytes32 _devFeatureBitmap) public { + devFeatureBitmap = _devFeatureBitmap; + } + /// @notice Allow the `useUpgradedFork` config to be overridden in testing environments /// @dev When true, the forked system WILL be upgraded in setUp(). /// When false, the forked system WILL NOT be upgraded in setUp(). @@ -232,7 +251,9 @@ contract DeployConfig is Script { } function latestGenesisFork() internal view returns (Fork) { - if (l2GenesisHoloceneTimeOffset == 0) { + if (l2GenesisJovianTimeOffset == 0) { + return Fork.JOVIAN; + } else if (l2GenesisHoloceneTimeOffset == 0) { return Fork.HOLOCENE; } else if (l2GenesisGraniteTimeOffset == 0) { return Fork.GRANITE; diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index 561426c5bfe23..fef559ef973c1 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -5,7 +5,6 @@ import { Script } from "forge-std/Script.sol"; // Libraries import { Chains } from "scripts/libraries/Chains.sol"; -import { LibString } from "@solady/utils/LibString.sol"; import { Types } from "scripts/libraries/Types.sol"; // Interfaces @@ -13,10 +12,12 @@ import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; -import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGameV2 } from "interfaces/dispute/v2/IFaultDisputeGameV2.sol"; +import { IPermissionedDisputeGameV2 } from "interfaces/dispute/v2/IPermissionedDisputeGameV2.sol"; +import { GameTypes, Duration } from "src/dispute/lib/Types.sol"; import { IOPContractsManager, IOPContractsManagerGameTypeAdder, @@ -27,6 +28,7 @@ import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManager.sol"; import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; @@ -38,7 +40,7 @@ import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContracts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; -import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; contract DeployImplementations is Script { struct Input { @@ -48,14 +50,17 @@ contract DeployImplementations is Script { uint256 proofMaturityDelaySeconds; uint256 disputeGameFinalityDelaySeconds; uint256 mipsVersion; - // This is used in opcm to signal which version of the L1 smart contracts is deployed. - // It takes the format of `op-contracts/v*.*.*`. - string l1ContractsRelease; + bytes32 devFeatureBitmap; + // V2 Dispute Game parameters + uint256 faultGameV2MaxGameDepth; + uint256 faultGameV2SplitDepth; + uint256 faultGameV2ClockExtension; + uint256 faultGameV2MaxClockDuration; // Outputs from DeploySuperchain.s.sol. ISuperchainConfig superchainConfigProxy; IProtocolVersions protocolVersionsProxy; IProxyAdmin superchainProxyAdmin; - address upgradeController; + address l1ProxyAdminOwner; address challenger; } @@ -69,9 +74,10 @@ contract DeployImplementations is Script { IOPContractsManagerStandardValidator opcmStandardValidator; IDelayedWETH delayedWETHImpl; IOptimismPortal optimismPortalImpl; + IOptimismPortalInterop optimismPortalInteropImpl; IETHLockbox ethLockboxImpl; IPreimageOracle preimageOracleSingleton; - IMIPS mipsSingleton; + IMIPS64 mipsSingleton; ISystemConfig systemConfigImpl; IL1CrossDomainMessenger l1CrossDomainMessengerImpl; IL1ERC721Bridge l1ERC721BridgeImpl; @@ -81,12 +87,20 @@ contract DeployImplementations is Script { IAnchorStateRegistry anchorStateRegistryImpl; ISuperchainConfig superchainConfigImpl; IProtocolVersions protocolVersionsImpl; + IFaultDisputeGameV2 faultDisputeGameV2Impl; + IPermissionedDisputeGameV2 permissionedDisputeGameV2Impl; } bytes32 internal _salt = DeployUtils.DEFAULT_SALT; // -------- Core Deployment Methods -------- + function runWithBytes(bytes memory _input) public returns (bytes memory) { + Input memory input = abi.decode(_input, (Input)); + Output memory output = run(input); + return abi.encode(output); + } + function run(Input memory _input) public returns (Output memory output_) { assertValidInput(_input); @@ -99,12 +113,17 @@ contract DeployImplementations is Script { deployL1StandardBridgeImpl(output_); deployOptimismMintableERC20FactoryImpl(output_); deployOptimismPortalImpl(_input, output_); + deployOptimismPortalInteropImpl(_input, output_); deployETHLockboxImpl(output_); deployDelayedWETHImpl(_input, output_); deployPreimageOracleSingleton(_input, output_); deployMipsSingleton(_input, output_); deployDisputeGameFactoryImpl(output_); deployAnchorStateRegistryImpl(_input, output_); + if (DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES)) { + deployFaultDisputeGameV2Impl(_input, output_); + deployPermissionedDisputeGameV2Impl(_input, output_); + } // Deploy the OP Contracts Manager with the new implementations set. deployOPContractsManager(_input, output_); @@ -119,8 +138,7 @@ contract DeployImplementations is Script { function createOPCMContract( Input memory _input, Output memory _output, - IOPContractsManager.Blueprints memory _blueprints, - string memory _l1ContractsRelease + IOPContractsManager.Blueprints memory _blueprints ) private returns (IOPContractsManager opcm_) @@ -130,6 +148,7 @@ contract DeployImplementations is Script { protocolVersionsImpl: address(_output.protocolVersionsImpl), l1ERC721BridgeImpl: address(_output.l1ERC721BridgeImpl), optimismPortalImpl: address(_output.optimismPortalImpl), + optimismPortalInteropImpl: address(_output.optimismPortalInteropImpl), ethLockboxImpl: address(_output.ethLockboxImpl), systemConfigImpl: address(_output.systemConfigImpl), optimismMintableERC20FactoryImpl: address(_output.optimismMintableERC20FactoryImpl), @@ -141,7 +160,7 @@ contract DeployImplementations is Script { mipsImpl: address(_output.mipsSingleton) }); - deployOPCMBPImplsContainer(_output, _blueprints, implementations); + deployOPCMBPImplsContainer(_input, _output, _blueprints, implementations); deployOPCMGameTypeAdder(_output); deployOPCMDeployer(_input, _output); deployOPCMUpgrader(_output); @@ -153,7 +172,7 @@ contract DeployImplementations is Script { // nosemgrep: sol-safety-deployutils-args DeployUtils.createDeterministic({ _name: "OPContractsManager", - _args: encodeOPCMConstructor(_l1ContractsRelease, _input, _output), + _args: encodeOPCMConstructor(_input, _output), _salt: _salt }) ); @@ -164,12 +183,10 @@ contract DeployImplementations is Script { /// @notice Encodes the constructor of the OPContractsManager contract. Used to avoid stack too /// deep errors inside of the createOPCMContract function. - /// @param _l1ContractsRelease The release of the L1 contracts. /// @param _input The deployment input parameters. /// @param _output The deployment output parameters. /// @return encoded_ The encoded constructor. function encodeOPCMConstructor( - string memory _l1ContractsRelease, Input memory _input, Output memory _output ) @@ -188,17 +205,13 @@ contract DeployImplementations is Script { _output.opcmStandardValidator, _input.superchainConfigProxy, _input.protocolVersionsProxy, - _input.superchainProxyAdmin, - _l1ContractsRelease, - _input.upgradeController + _input.superchainProxyAdmin ) ) ); } function deployOPContractsManager(Input memory _input, Output memory _output) private { - string memory l1ContractsRelease = _input.l1ContractsRelease; - // First we deploy the blueprints for the singletons deployed by OPCM. // forgefmt: disable-start IOPContractsManager.Blueprints memory blueprints; @@ -223,7 +236,7 @@ contract DeployImplementations is Script { // forgefmt: disable-end vm.stopBroadcast(); - IOPContractsManager opcm = createOPCMContract(_input, _output, blueprints, l1ContractsRelease); + IOPContractsManager opcm = createOPCMContract(_input, _output, blueprints); vm.label(address(opcm), "OPContractsManager"); _output.opcm = opcm; @@ -361,8 +374,6 @@ contract DeployImplementations is Script { // These are: // - FaultDisputeGame (not proxied) // - PermissionedDisputeGame (not proxied) - // - DelayedWeth (proxies only) - // - OptimismPortal2 (proxies only) function deployOptimismPortalImpl(Input memory _input, Output memory _output) private { uint256 proofMaturityDelaySeconds = _input.proofMaturityDelaySeconds; @@ -379,6 +390,21 @@ contract DeployImplementations is Script { _output.optimismPortalImpl = impl; } + function deployOptimismPortalInteropImpl(Input memory _input, Output memory _output) private { + uint256 proofMaturityDelaySeconds = _input.proofMaturityDelaySeconds; + IOptimismPortalInterop impl = IOptimismPortalInterop( + DeployUtils.createDeterministic({ + _name: "OptimismPortalInterop", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IOptimismPortalInterop.__constructor__, (proofMaturityDelaySeconds)) + ), + _salt: _salt + }) + ); + vm.label(address(impl), "OptimismPortalInteropImpl"); + _output.optimismPortalInteropImpl = impl; + } + function deployDelayedWETHImpl(Input memory _input, Output memory _output) private { uint256 withdrawalDelaySeconds = _input.withdrawalDelaySeconds; IDelayedWETH impl = IDelayedWETH( @@ -419,10 +445,10 @@ contract DeployImplementations is Script { } } - IMIPS singleton = IMIPS( + IMIPS64 singleton = IMIPS64( DeployUtils.createDeterministic({ _name: "MIPS64", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS2.__constructor__, (preimageOracle, mipsVersion))), + _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS64.__constructor__, (preimageOracle, mipsVersion))), _salt: DeployUtils.DEFAULT_SALT }) ); @@ -457,7 +483,46 @@ contract DeployImplementations is Script { _output.anchorStateRegistryImpl = impl; } + function deployFaultDisputeGameV2Impl(Input memory _input, Output memory _output) private { + IFaultDisputeGameV2.GameConstructorParams memory params; + params.gameType = GameTypes.CANNON; + params.maxGameDepth = _input.faultGameV2MaxGameDepth; + params.splitDepth = _input.faultGameV2SplitDepth; + params.clockExtension = Duration.wrap(uint64(_input.faultGameV2ClockExtension)); + params.maxClockDuration = Duration.wrap(uint64(_input.faultGameV2MaxClockDuration)); + + IFaultDisputeGameV2 impl = IFaultDisputeGameV2( + DeployUtils.createDeterministic({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IFaultDisputeGameV2.__constructor__, (params))), + _salt: _salt + }) + ); + vm.label(address(impl), "FaultDisputeGameV2Impl"); + _output.faultDisputeGameV2Impl = impl; + } + + function deployPermissionedDisputeGameV2Impl(Input memory _input, Output memory _output) private { + IFaultDisputeGameV2.GameConstructorParams memory params; + params.gameType = GameTypes.PERMISSIONED_CANNON; + params.maxGameDepth = _input.faultGameV2MaxGameDepth; + params.splitDepth = _input.faultGameV2SplitDepth; + params.clockExtension = Duration.wrap(uint64(_input.faultGameV2ClockExtension)); + params.maxClockDuration = Duration.wrap(uint64(_input.faultGameV2MaxClockDuration)); + + IPermissionedDisputeGameV2 impl = IPermissionedDisputeGameV2( + DeployUtils.createDeterministic({ + _name: "PermissionedDisputeGameV2", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IPermissionedDisputeGameV2.__constructor__, (params))), + _salt: _salt + }) + ); + vm.label(address(impl), "PermissionedDisputeGameV2Impl"); + _output.permissionedDisputeGameV2Impl = impl; + } + function deployOPCMBPImplsContainer( + Input memory _input, Output memory _output, IOPContractsManager.Blueprints memory _blueprints, IOPContractsManager.Implementations memory _implementations @@ -468,7 +533,10 @@ contract DeployImplementations is Script { DeployUtils.createDeterministic({ _name: "OPContractsManager.sol:OPContractsManagerContractsContainer", _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerContractsContainer.__constructor__, (_blueprints, _implementations)) + abi.encodeCall( + IOPContractsManagerContractsContainer.__constructor__, + (_blueprints, _implementations, _input.devFeatureBitmap) + ) ), _salt: _salt }) @@ -543,6 +611,7 @@ contract DeployImplementations is Script { IOPContractsManagerStandardValidator.Implementations memory opcmImplementations; opcmImplementations.l1ERC721BridgeImpl = _implementations.l1ERC721BridgeImpl; opcmImplementations.optimismPortalImpl = _implementations.optimismPortalImpl; + opcmImplementations.optimismPortalInteropImpl = _implementations.optimismPortalInteropImpl; opcmImplementations.ethLockboxImpl = _implementations.ethLockboxImpl; opcmImplementations.systemConfigImpl = _implementations.systemConfigImpl; opcmImplementations.optimismMintableERC20FactoryImpl = _implementations.optimismMintableERC20FactoryImpl; @@ -562,9 +631,10 @@ contract DeployImplementations is Script { ( opcmImplementations, _input.superchainConfigProxy, - _input.upgradeController, // Proxy admin owner + _input.l1ProxyAdminOwner, _input.challenger, - _input.withdrawalDelaySeconds + _input.withdrawalDelaySeconds, + _input.devFeatureBitmap ) ) ), @@ -576,6 +646,35 @@ contract DeployImplementations is Script { } function assertValidInput(Input memory _input) private pure { + if (DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES)) { + // Validate V2 game depth parameters are sensible + require( + _input.faultGameV2MaxGameDepth > 0 && _input.faultGameV2MaxGameDepth <= 125, + "DeployImplementations: faultGameV2MaxGameDepth out of valid range (1-125)" + ); + // V2 contract requires splitDepth >= 2 and splitDepth + 1 < maxGameDepth + require( + _input.faultGameV2SplitDepth >= 2 && _input.faultGameV2SplitDepth + 1 < _input.faultGameV2MaxGameDepth, + "DeployImplementations: faultGameV2SplitDepth must be >= 2 and splitDepth + 1 < maxGameDepth" + ); + + // Validate V2 clock parameters fit in uint64 before deployment + require( + _input.faultGameV2ClockExtension <= type(uint64).max, + "DeployImplementations: faultGameV2ClockExtension too large for uint64" + ); + require( + _input.faultGameV2MaxClockDuration <= type(uint64).max, + "DeployImplementations: faultGameV2MaxClockDuration too large for uint64" + ); + require( + _input.faultGameV2MaxClockDuration >= _input.faultGameV2ClockExtension, + "DeployImplementations: maxClockDuration must be >= clockExtension" + ); + require( + _input.faultGameV2ClockExtension > 0, "DeployImplementations: faultGameV2ClockExtension must be > 0" + ); + } require(_input.withdrawalDelaySeconds != 0, "DeployImplementations: withdrawalDelaySeconds not set"); require(_input.minProposalSizeBytes != 0, "DeployImplementations: minProposalSizeBytes not set"); require(_input.challengePeriodSeconds != 0, "DeployImplementations: challengePeriodSeconds not set"); @@ -588,7 +687,6 @@ contract DeployImplementations is Script { "DeployImplementations: disputeGameFinalityDelaySeconds not set" ); require(_input.mipsVersion != 0, "DeployImplementations: mipsVersion not set"); - require(!LibString.eq(_input.l1ContractsRelease, ""), "DeployImplementations: l1ContractsRelease not set"); require( address(_input.superchainConfigProxy) != address(0), "DeployImplementations: superchainConfigProxy not set" ); @@ -598,10 +696,10 @@ contract DeployImplementations is Script { require( address(_input.superchainProxyAdmin) != address(0), "DeployImplementations: superchainProxyAdmin not set" ); - require(address(_input.upgradeController) != address(0), "DeployImplementations: upgradeController not set"); + require(address(_input.l1ProxyAdminOwner) != address(0), "DeployImplementations: L1ProxyAdminOwner not set"); } - function assertValidOutput(Input memory _input, Output memory _output) private view { + function assertValidOutput(Input memory _input, Output memory _output) private { // With 12 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. address[] memory addrs1 = Solarray.addresses( @@ -625,8 +723,28 @@ contract DeployImplementations is Script { address(_output.ethLockboxImpl) ); + // Only include V2 contracts in validation if they were deployed + if (DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES)) { + address[] memory v2Addrs = Solarray.addresses( + address(_output.faultDisputeGameV2Impl), address(_output.permissionedDisputeGameV2Impl) + ); + addrs2 = Solarray.extend(addrs2, v2Addrs); + } + DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); + // Validate V2 contracts not deployed when flag is disabled + if (!DevFeatures.isDevFeatureEnabled(_input.devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES)) { + require( + address(_output.faultDisputeGameV2Impl) == address(0), + "DeployImplementations: V2 flag disabled but FaultDisputeGameV2 was deployed" + ); + require( + address(_output.permissionedDisputeGameV2Impl) == address(0), + "DeployImplementations: V2 flag disabled but PermissionedDisputeGameV2 was deployed" + ); + } + Types.ContractSet memory impls = ChainAssertions.dioToContractSet(_output); ChainAssertions.checkDelayedWETHImpl(_output.delayedWETHImpl, _input.withdrawalDelaySeconds); @@ -649,7 +767,7 @@ contract DeployImplementations is Script { _impls: impls, _proxies: proxies, _opcm: IOPContractsManager(address(_output.opcm)), - _mips: IMIPS(address(_output.mipsSingleton)), + _mips: IMIPS64(address(_output.mipsSingleton)), _superchainProxyAdmin: _input.superchainProxyAdmin }); @@ -661,8 +779,7 @@ contract DeployImplementations is Script { _isProxy: false }); ChainAssertions.checkETHLockboxImpl(_output.ethLockboxImpl, _output.optimismPortalImpl); - // We can use DeployOPChainInput(address(0)) here because no method will be called on _doi when isProxy is false - ChainAssertions.checkSystemConfig(impls, DeployOPChainInput(address(0)), false); + ChainAssertions.checkSystemConfigImpls(impls); ChainAssertions.checkAnchorStateRegistryProxy(IAnchorStateRegistry(impls.AnchorStateRegistry), false); } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol index 4f2c47ab579b6..190dbd8e44d9a 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployMIPS.s.sol @@ -10,8 +10,7 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Interfaces import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; -import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; /// @title DeployMIPSInput @@ -54,18 +53,18 @@ contract DeployMIPSInput is BaseDeployIO { /// @title DeployMIPSOutput contract DeployMIPSOutput is BaseDeployIO { - IMIPS internal _mipsSingleton; + IMIPS64 internal _mipsSingleton; function set(bytes4 _sel, address _value) public { if (_sel == this.mipsSingleton.selector) { require(_value != address(0), "DeployMIPS: mipsSingleton cannot be zero address"); - _mipsSingleton = IMIPS(_value); + _mipsSingleton = IMIPS64(_value); } else { revert("DeployMIPS: unknown selector"); } } - function mipsSingleton() public view returns (IMIPS) { + function mipsSingleton() public view returns (IMIPS64) { DeployUtils.assertValidContractAddress(address(_mipsSingleton)); return _mipsSingleton; } @@ -82,10 +81,10 @@ contract DeployMIPS is Script { uint256 mipsVersion = _mi.mipsVersion(); IPreimageOracle preimageOracle = IPreimageOracle(_mi.preimageOracle()); - IMIPS singleton = IMIPS( + IMIPS64 singleton = IMIPS64( DeployUtils.createDeterministic({ _name: "MIPS64", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS2.__constructor__, (preimageOracle, mipsVersion))), + _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS64.__constructor__, (preimageOracle, mipsVersion))), _salt: DeployUtils.DEFAULT_SALT }) ); @@ -100,7 +99,7 @@ contract DeployMIPS is Script { } function assertValidMipsSingleton(DeployMIPSInput _mi, DeployMIPSOutput _mo) internal view { - IMIPS mips = _mo.mipsSingleton(); + IMIPS64 mips = _mo.mipsSingleton(); require(address(mips.oracle()) == address(_mi.preimageOracle()), "MIPS-10"); } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployMIPS2.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployMIPS2.s.sol index 64be5028ee13e..59344e2fc5ba0 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployMIPS2.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployMIPS2.s.sol @@ -9,8 +9,7 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; // Interfaces import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; -import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; /// @title DeployMIPS @@ -23,7 +22,7 @@ contract DeployMIPS2 is Script { } struct Output { - IMIPS mipsSingleton; + IMIPS64 mipsSingleton; } function run(Input memory _input) public returns (Output memory output_) { @@ -37,11 +36,11 @@ contract DeployMIPS2 is Script { function deployMipsSingleton(Input memory _input, Output memory _output) internal { uint256 mipsVersion = _input.mipsVersion; - IMIPS singleton = IMIPS( + IMIPS64 singleton = IMIPS64( DeployUtils.createDeterministic({ _name: "MIPS64", _args: DeployUtils.encodeConstructor( - abi.encodeCall(IMIPS2.__constructor__, (_input.preimageOracle, mipsVersion)) + abi.encodeCall(IMIPS64.__constructor__, (_input.preimageOracle, mipsVersion)) ), _salt: DeployUtils.DEFAULT_SALT }) diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol index 050dfed7695b9..77a3d3d4351f8 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOPChain.s.sol @@ -3,12 +3,8 @@ pragma solidity 0.8.15; import { Script } from "forge-std/Script.sol"; -import { SafeCast } from "@openzeppelin/contracts/utils/math/SafeCast.sol"; - import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Solarray } from "scripts/libraries/Solarray.sol"; -import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; - import { ChainAssertions } from "scripts/deploy/ChainAssertions.sol"; import { Constants as ScriptConstants } from "scripts/libraries/Constants.sol"; import { Types } from "scripts/libraries/Types.sol"; @@ -21,8 +17,6 @@ import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol" import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; -import { Claim, Duration, GameType } from "src/dispute/lib/Types.sol"; - import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; @@ -31,357 +25,58 @@ import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; -contract DeployOPChainInput is BaseDeployIO { - address internal _opChainProxyAdminOwner; - address internal _systemConfigOwner; - address internal _batcher; - address internal _unsafeBlockSigner; - address internal _proposer; - address internal _challenger; - - // TODO Add fault proofs inputs in a future PR. - uint32 internal _basefeeScalar; - uint32 internal _blobBaseFeeScalar; - uint256 internal _l2ChainId; - IOPContractsManager internal _opcm; - string internal _saltMixer; - uint64 internal _gasLimit; - - // Configurable dispute game inputs - GameType internal _disputeGameType; - Claim internal _disputeAbsolutePrestate; - uint256 internal _disputeMaxGameDepth; - uint256 internal _disputeSplitDepth; - Duration internal _disputeClockExtension; - Duration internal _disputeMaxClockDuration; - bool internal _allowCustomDisputeParameters; - - uint32 internal _operatorFeeScalar; - uint64 internal _operatorFeeConstant; - - function set(bytes4 _sel, address _addr) public { - require(_addr != address(0), "DeployOPChainInput: cannot set zero address"); - if (_sel == this.opChainProxyAdminOwner.selector) _opChainProxyAdminOwner = _addr; - else if (_sel == this.systemConfigOwner.selector) _systemConfigOwner = _addr; - else if (_sel == this.batcher.selector) _batcher = _addr; - else if (_sel == this.unsafeBlockSigner.selector) _unsafeBlockSigner = _addr; - else if (_sel == this.proposer.selector) _proposer = _addr; - else if (_sel == this.challenger.selector) _challenger = _addr; - else if (_sel == this.opcm.selector) _opcm = IOPContractsManager(_addr); - else revert("DeployOPChainInput: unknown selector"); - } - - function set(bytes4 _sel, uint256 _value) public { - if (_sel == this.basefeeScalar.selector) { - _basefeeScalar = SafeCast.toUint32(_value); - } else if (_sel == this.blobBaseFeeScalar.selector) { - _blobBaseFeeScalar = SafeCast.toUint32(_value); - } else if (_sel == this.l2ChainId.selector) { - require(_value != 0 && _value != block.chainid, "DeployOPChainInput: invalid l2ChainId"); - _l2ChainId = _value; - } else if (_sel == this.gasLimit.selector) { - _gasLimit = SafeCast.toUint64(_value); - } else if (_sel == this.disputeGameType.selector) { - _disputeGameType = GameType.wrap(SafeCast.toUint32(_value)); - } else if (_sel == this.disputeMaxGameDepth.selector) { - _disputeMaxGameDepth = SafeCast.toUint64(_value); - } else if (_sel == this.disputeSplitDepth.selector) { - _disputeSplitDepth = SafeCast.toUint64(_value); - } else if (_sel == this.disputeClockExtension.selector) { - _disputeClockExtension = Duration.wrap(SafeCast.toUint64(_value)); - } else if (_sel == this.disputeMaxClockDuration.selector) { - _disputeMaxClockDuration = Duration.wrap(SafeCast.toUint64(_value)); - } else if (_sel == this.operatorFeeScalar.selector) { - _operatorFeeScalar = SafeCast.toUint32(_value); - } else if (_sel == this.operatorFeeConstant.selector) { - _operatorFeeConstant = SafeCast.toUint64(_value); - } else { - revert("DeployOPChainInput: unknown selector"); - } - } - - function set(bytes4 _sel, string memory _value) public { - require((bytes(_value).length != 0), "DeployImplementationsInput: cannot set empty string"); - if (_sel == this.saltMixer.selector) _saltMixer = _value; - else revert("DeployOPChainInput: unknown selector"); - } - - function set(bytes4 _sel, bytes32 _value) public { - if (_sel == this.disputeAbsolutePrestate.selector) _disputeAbsolutePrestate = Claim.wrap(_value); - else revert("DeployImplementationsInput: unknown selector"); - } - - function set(bytes4 _sel, bool _value) public { - if (_sel == this.allowCustomDisputeParameters.selector) _allowCustomDisputeParameters = _value; - else revert("DeployOPChainInput: unknown selector"); - } - - function opChainProxyAdminOwner() public view returns (address) { - require(_opChainProxyAdminOwner != address(0), "DeployOPChainInput: not set"); - return _opChainProxyAdminOwner; - } - - function systemConfigOwner() public view returns (address) { - require(_systemConfigOwner != address(0), "DeployOPChainInput: not set"); - return _systemConfigOwner; - } - - function batcher() public view returns (address) { - require(_batcher != address(0), "DeployOPChainInput: not set"); - return _batcher; - } - - function unsafeBlockSigner() public view returns (address) { - require(_unsafeBlockSigner != address(0), "DeployOPChainInput: not set"); - return _unsafeBlockSigner; - } - - function proposer() public view returns (address) { - require(_proposer != address(0), "DeployOPChainInput: not set"); - return _proposer; - } - - function challenger() public view returns (address) { - require(_challenger != address(0), "DeployOPChainInput: not set"); - return _challenger; - } - - function basefeeScalar() public view returns (uint32) { - require(_basefeeScalar != 0, "DeployOPChainInput: not set"); - return _basefeeScalar; - } - - function blobBaseFeeScalar() public view returns (uint32) { - require(_blobBaseFeeScalar != 0, "DeployOPChainInput: not set"); - return _blobBaseFeeScalar; - } - - function l2ChainId() public view returns (uint256) { - require(_l2ChainId != 0, "DeployOPChainInput: not set"); - require(_l2ChainId != block.chainid, "DeployOPChainInput: invalid l2ChainId"); - return _l2ChainId; - } - - function startingAnchorRoot() public pure returns (bytes memory) { - // WARNING: For now always hardcode the starting permissioned game anchor root to 0xdead, - // and we do not set anything for the permissioned game. This is because we currently only - // support deploying straight to permissioned games, and the starting root does not - // matter for that, as long as it is non-zero, since no games will be played. We do not - // deploy the permissionless game (and therefore do not set a starting root for it here) - // because to to update to the permissionless game, we will need to update its starting - // anchor root and deploy a new permissioned dispute game contract anyway. - // - // You can `console.logBytes(abi.encode(ScriptConstants.DEFAULT_OUTPUT_ROOT()))` to get the bytes that - // are hardcoded into `op-chain-ops/deployer/opcm/opchain.go` - - return abi.encode(ScriptConstants.DEFAULT_OUTPUT_ROOT()); - } - - function opcm() public view returns (IOPContractsManager) { - require(address(_opcm) != address(0), "DeployOPChainInput: not set"); - DeployUtils.assertValidContractAddress(address(_opcm)); - return _opcm; - } - - function saltMixer() public view returns (string memory) { - return _saltMixer; - } - - function gasLimit() public view returns (uint64) { - return _gasLimit; - } - - function disputeGameType() public view returns (GameType) { - return _disputeGameType; - } - - function disputeAbsolutePrestate() public view returns (Claim) { - return _disputeAbsolutePrestate; - } - - function disputeMaxGameDepth() public view returns (uint256) { - return _disputeMaxGameDepth; - } - - function disputeSplitDepth() public view returns (uint256) { - return _disputeSplitDepth; - } - - function disputeClockExtension() public view returns (Duration) { - return _disputeClockExtension; - } - - function disputeMaxClockDuration() public view returns (Duration) { - return _disputeMaxClockDuration; - } - - function allowCustomDisputeParameters() public view returns (bool) { - return _allowCustomDisputeParameters; - } - - function operatorFeeScalar() public view returns (uint32) { - return _operatorFeeScalar; - } - - function operatorFeeConstant() public view returns (uint64) { - return _operatorFeeConstant; - } -} - -contract DeployOPChainOutput is BaseDeployIO { - IProxyAdmin internal _opChainProxyAdmin; - IAddressManager internal _addressManager; - IL1ERC721Bridge internal _l1ERC721BridgeProxy; - ISystemConfig internal _systemConfigProxy; - IOptimismMintableERC20Factory internal _optimismMintableERC20FactoryProxy; - IL1StandardBridge internal _l1StandardBridgeProxy; - IL1CrossDomainMessenger internal _l1CrossDomainMessengerProxy; - IOptimismPortal internal _optimismPortalProxy; - IETHLockbox internal _ethLockboxProxy; - IDisputeGameFactory internal _disputeGameFactoryProxy; - IAnchorStateRegistry internal _anchorStateRegistryProxy; - IFaultDisputeGame internal _faultDisputeGame; - IPermissionedDisputeGame internal _permissionedDisputeGame; - IDelayedWETH internal _delayedWETHPermissionedGameProxy; - IDelayedWETH internal _delayedWETHPermissionlessGameProxy; - - function set(bytes4 _sel, address _addr) public virtual { - require(_addr != address(0), "DeployOPChainOutput: cannot set zero address"); - // forgefmt: disable-start - if (_sel == this.opChainProxyAdmin.selector) _opChainProxyAdmin = IProxyAdmin(_addr) ; - else if (_sel == this.addressManager.selector) _addressManager = IAddressManager(_addr) ; - else if (_sel == this.l1ERC721BridgeProxy.selector) _l1ERC721BridgeProxy = IL1ERC721Bridge(_addr) ; - else if (_sel == this.systemConfigProxy.selector) _systemConfigProxy = ISystemConfig(_addr) ; - else if (_sel == this.optimismMintableERC20FactoryProxy.selector) _optimismMintableERC20FactoryProxy = IOptimismMintableERC20Factory(_addr) ; - else if (_sel == this.l1StandardBridgeProxy.selector) _l1StandardBridgeProxy = IL1StandardBridge(payable(_addr)) ; - else if (_sel == this.l1CrossDomainMessengerProxy.selector) _l1CrossDomainMessengerProxy = IL1CrossDomainMessenger(_addr) ; - else if (_sel == this.optimismPortalProxy.selector) _optimismPortalProxy = IOptimismPortal(payable(_addr)) ; - else if (_sel == this.ethLockboxProxy.selector) _ethLockboxProxy = IETHLockbox(payable(_addr)) ; - else if (_sel == this.disputeGameFactoryProxy.selector) _disputeGameFactoryProxy = IDisputeGameFactory(_addr) ; - else if (_sel == this.anchorStateRegistryProxy.selector) _anchorStateRegistryProxy = IAnchorStateRegistry(_addr) ; - else if (_sel == this.faultDisputeGame.selector) _faultDisputeGame = IFaultDisputeGame(_addr) ; - else if (_sel == this.permissionedDisputeGame.selector) _permissionedDisputeGame = IPermissionedDisputeGame(_addr) ; - else if (_sel == this.delayedWETHPermissionedGameProxy.selector) _delayedWETHPermissionedGameProxy = IDelayedWETH(payable(_addr)) ; - else if (_sel == this.delayedWETHPermissionlessGameProxy.selector) _delayedWETHPermissionlessGameProxy = IDelayedWETH(payable(_addr)) ; - else revert("DeployOPChainOutput: unknown selector"); - // forgefmt: disable-end - } - - function opChainProxyAdmin() public view returns (IProxyAdmin) { - DeployUtils.assertValidContractAddress(address(_opChainProxyAdmin)); - return _opChainProxyAdmin; - } - - function addressManager() public view returns (IAddressManager) { - DeployUtils.assertValidContractAddress(address(_addressManager)); - return _addressManager; - } - - function l1ERC721BridgeProxy() public returns (IL1ERC721Bridge) { - DeployUtils.assertValidContractAddress(address(_l1ERC721BridgeProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_l1ERC721BridgeProxy)); - return _l1ERC721BridgeProxy; - } - - function systemConfigProxy() public returns (ISystemConfig) { - DeployUtils.assertValidContractAddress(address(_systemConfigProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_systemConfigProxy)); - return _systemConfigProxy; - } - - function optimismMintableERC20FactoryProxy() public returns (IOptimismMintableERC20Factory) { - DeployUtils.assertValidContractAddress(address(_optimismMintableERC20FactoryProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_optimismMintableERC20FactoryProxy)); - return _optimismMintableERC20FactoryProxy; - } - - function l1StandardBridgeProxy() public returns (IL1StandardBridge) { - DeployUtils.assertValidContractAddress(address(_l1StandardBridgeProxy)); - DeployUtils.assertL1ChugSplashImplementationSet(address(_l1StandardBridgeProxy)); - return _l1StandardBridgeProxy; - } - - function l1CrossDomainMessengerProxy() public view returns (IL1CrossDomainMessenger) { - DeployUtils.assertValidContractAddress(address(_l1CrossDomainMessengerProxy)); - DeployUtils.assertResolvedDelegateProxyImplementationSet("OVM_L1CrossDomainMessenger", addressManager()); - return _l1CrossDomainMessengerProxy; - } - - function optimismPortalProxy() public returns (IOptimismPortal) { - DeployUtils.assertValidContractAddress(address(_optimismPortalProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_optimismPortalProxy)); - return _optimismPortalProxy; - } - - function ethLockboxProxy() public returns (IETHLockbox) { - DeployUtils.assertValidContractAddress(address(_ethLockboxProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_ethLockboxProxy)); - return _ethLockboxProxy; - } - - function disputeGameFactoryProxy() public returns (IDisputeGameFactory) { - DeployUtils.assertValidContractAddress(address(_disputeGameFactoryProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_disputeGameFactoryProxy)); - return _disputeGameFactoryProxy; - } - - function anchorStateRegistryProxy() public returns (IAnchorStateRegistry) { - DeployUtils.assertValidContractAddress(address(_anchorStateRegistryProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_anchorStateRegistryProxy)); - return _anchorStateRegistryProxy; - } - - function faultDisputeGame() public view returns (IFaultDisputeGame) { - DeployUtils.assertValidContractAddress(address(_faultDisputeGame)); - return _faultDisputeGame; - } - - function permissionedDisputeGame() public view returns (IPermissionedDisputeGame) { - DeployUtils.assertValidContractAddress(address(_permissionedDisputeGame)); - return _permissionedDisputeGame; - } - - function delayedWETHPermissionedGameProxy() public returns (IDelayedWETH) { - DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionedGameProxy)); - DeployUtils.assertERC1967ImplementationSet(address(_delayedWETHPermissionedGameProxy)); - return _delayedWETHPermissionedGameProxy; - } - - function delayedWETHPermissionlessGameProxy() public view returns (IDelayedWETH) { - // TODO: Eventually switch from Permissioned to Permissionless. Add this check back in. - // DeployUtils.assertValidContractAddress(address(_delayedWETHPermissionlessGameProxy)); - return _delayedWETHPermissionlessGameProxy; - } -} - contract DeployOPChain is Script { - // -------- Core Deployment Methods -------- - - function run(DeployOPChainInput _doi, DeployOPChainOutput _doo) public { - IOPContractsManager opcm = _doi.opcm(); + struct Output { + IProxyAdmin opChainProxyAdmin; + IAddressManager addressManager; + IL1ERC721Bridge l1ERC721BridgeProxy; + ISystemConfig systemConfigProxy; + IOptimismMintableERC20Factory optimismMintableERC20FactoryProxy; + IL1StandardBridge l1StandardBridgeProxy; + IL1CrossDomainMessenger l1CrossDomainMessengerProxy; + IOptimismPortal optimismPortalProxy; + IETHLockbox ethLockboxProxy; + IDisputeGameFactory disputeGameFactoryProxy; + IAnchorStateRegistry anchorStateRegistryProxy; + IFaultDisputeGame faultDisputeGame; + IPermissionedDisputeGame permissionedDisputeGame; + IDelayedWETH delayedWETHPermissionedGameProxy; + IDelayedWETH delayedWETHPermissionlessGameProxy; + } + + function runWithBytes(bytes memory _input) public returns (bytes memory) { + Types.DeployOPChainInput memory input = abi.decode(_input, (Types.DeployOPChainInput)); + Output memory output_ = run(input); + return abi.encode(output_); + } + + function run(Types.DeployOPChainInput memory _input) public returns (Output memory output_) { + checkInput(_input); + + IOPContractsManager opcm = IOPContractsManager(_input.opcm); IOPContractsManager.Roles memory roles = IOPContractsManager.Roles({ - opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), - systemConfigOwner: _doi.systemConfigOwner(), - batcher: _doi.batcher(), - unsafeBlockSigner: _doi.unsafeBlockSigner(), - proposer: _doi.proposer(), - challenger: _doi.challenger() + opChainProxyAdminOwner: _input.opChainProxyAdminOwner, + systemConfigOwner: _input.systemConfigOwner, + batcher: _input.batcher, + unsafeBlockSigner: _input.unsafeBlockSigner, + proposer: _input.proposer, + challenger: _input.challenger }); IOPContractsManager.DeployInput memory deployInput = IOPContractsManager.DeployInput({ roles: roles, - basefeeScalar: _doi.basefeeScalar(), - blobBasefeeScalar: _doi.blobBaseFeeScalar(), - l2ChainId: _doi.l2ChainId(), - startingAnchorRoot: _doi.startingAnchorRoot(), - saltMixer: _doi.saltMixer(), - gasLimit: _doi.gasLimit(), - disputeGameType: _doi.disputeGameType(), - disputeAbsolutePrestate: _doi.disputeAbsolutePrestate(), - disputeMaxGameDepth: _doi.disputeMaxGameDepth(), - disputeSplitDepth: _doi.disputeSplitDepth(), - disputeClockExtension: _doi.disputeClockExtension(), - disputeMaxClockDuration: _doi.disputeMaxClockDuration() + basefeeScalar: _input.basefeeScalar, + blobBasefeeScalar: _input.blobBaseFeeScalar, + l2ChainId: _input.l2ChainId, + startingAnchorRoot: startingAnchorRoot(), + saltMixer: _input.saltMixer, + gasLimit: _input.gasLimit, + disputeGameType: _input.disputeGameType, + disputeAbsolutePrestate: _input.disputeAbsolutePrestate, + disputeMaxGameDepth: _input.disputeMaxGameDepth, + disputeSplitDepth: _input.disputeSplitDepth, + disputeClockExtension: _input.disputeClockExtension, + disputeMaxClockDuration: _input.disputeMaxClockDuration }); vm.broadcast(msg.sender); @@ -398,199 +93,221 @@ contract DeployOPChain is Script { vm.label(address(deployOutput.ethLockboxProxy), "ethLockboxProxy"); vm.label(address(deployOutput.disputeGameFactoryProxy), "disputeGameFactoryProxy"); vm.label(address(deployOutput.anchorStateRegistryProxy), "anchorStateRegistryProxy"); - // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); vm.label(address(deployOutput.permissionedDisputeGame), "permissionedDisputeGame"); vm.label(address(deployOutput.delayedWETHPermissionedGameProxy), "delayedWETHPermissionedGameProxy"); // TODO: Eventually switch from Permissioned to Permissionless. + // vm.label(address(deployOutput.faultDisputeGame), "faultDisputeGame"); // vm.label(address(deployOutput.delayedWETHPermissionlessGameProxy), "delayedWETHPermissionlessGameProxy"); - _doo.set(_doo.opChainProxyAdmin.selector, address(deployOutput.opChainProxyAdmin)); - _doo.set(_doo.addressManager.selector, address(deployOutput.addressManager)); - _doo.set(_doo.l1ERC721BridgeProxy.selector, address(deployOutput.l1ERC721BridgeProxy)); - _doo.set(_doo.systemConfigProxy.selector, address(deployOutput.systemConfigProxy)); - _doo.set( - _doo.optimismMintableERC20FactoryProxy.selector, address(deployOutput.optimismMintableERC20FactoryProxy) - ); - _doo.set(_doo.l1StandardBridgeProxy.selector, address(deployOutput.l1StandardBridgeProxy)); - _doo.set(_doo.l1CrossDomainMessengerProxy.selector, address(deployOutput.l1CrossDomainMessengerProxy)); - _doo.set(_doo.optimismPortalProxy.selector, address(deployOutput.optimismPortalProxy)); - _doo.set(_doo.ethLockboxProxy.selector, address(deployOutput.ethLockboxProxy)); - _doo.set(_doo.disputeGameFactoryProxy.selector, address(deployOutput.disputeGameFactoryProxy)); - _doo.set(_doo.anchorStateRegistryProxy.selector, address(deployOutput.anchorStateRegistryProxy)); - // _doo.set(_doo.faultDisputeGame.selector, address(deployOutput.faultDisputeGame)); - _doo.set(_doo.permissionedDisputeGame.selector, address(deployOutput.permissionedDisputeGame)); - _doo.set(_doo.delayedWETHPermissionedGameProxy.selector, address(deployOutput.delayedWETHPermissionedGameProxy)); - // TODO: Eventually switch from Permissioned to Permissionless. - // _doo.set( - // _doo.delayedWETHPermissionlessGameProxy.selector, - // address(deployOutput.delayedWETHPermissionlessGameProxy) - // ); + output_ = Output({ + opChainProxyAdmin: deployOutput.opChainProxyAdmin, + addressManager: deployOutput.addressManager, + l1ERC721BridgeProxy: deployOutput.l1ERC721BridgeProxy, + systemConfigProxy: deployOutput.systemConfigProxy, + optimismMintableERC20FactoryProxy: deployOutput.optimismMintableERC20FactoryProxy, + l1StandardBridgeProxy: deployOutput.l1StandardBridgeProxy, + l1CrossDomainMessengerProxy: deployOutput.l1CrossDomainMessengerProxy, + optimismPortalProxy: deployOutput.optimismPortalProxy, + ethLockboxProxy: deployOutput.ethLockboxProxy, + disputeGameFactoryProxy: deployOutput.disputeGameFactoryProxy, + anchorStateRegistryProxy: deployOutput.anchorStateRegistryProxy, + faultDisputeGame: deployOutput.faultDisputeGame, + permissionedDisputeGame: deployOutput.permissionedDisputeGame, + delayedWETHPermissionedGameProxy: deployOutput.delayedWETHPermissionedGameProxy, + delayedWETHPermissionlessGameProxy: deployOutput.delayedWETHPermissionlessGameProxy + }); + + checkOutput(_input, output_); + } + + // -------- Validations -------- + + function checkInput(Types.DeployOPChainInput memory _i) public view { + require(_i.opChainProxyAdminOwner != address(0), "DeployOPChainInput: opChainProxyAdminOwner not set"); + require(_i.systemConfigOwner != address(0), "DeployOPChainInput: systemConfigOwner not set"); + require(_i.batcher != address(0), "DeployOPChainInput: batcher not set"); + require(_i.unsafeBlockSigner != address(0), "DeployOPChainInput: unsafeBlockSigner not set"); + require(_i.proposer != address(0), "DeployOPChainInput: proposer not set"); + require(_i.challenger != address(0), "DeployOPChainInput: challenger not set"); + + require(_i.blobBaseFeeScalar != 0, "DeployOPChainInput: blobBaseFeeScalar not set"); + require(_i.basefeeScalar != 0, "DeployOPChainInput: basefeeScalar not set"); + require(_i.gasLimit != 0, "DeployOPChainInput: gasLimit not set"); - checkOutput(_doi, _doo); + require(_i.l2ChainId != 0, "DeployOPChainInput: l2ChainId not set"); + require(_i.l2ChainId != block.chainid, "DeployOPChainInput: l2ChainId matches block.chainid"); + + require(_i.opcm != address(0), "DeployOPChainInput: opcm not set"); + DeployUtils.assertValidContractAddress(_i.opcm); + + require(_i.disputeMaxGameDepth != 0, "DeployOPChainInput: disputeMaxGameDepth not set"); + require(_i.disputeSplitDepth != 0, "DeployOPChainInput: disputeSplitDepth not set"); + require(_i.disputeMaxClockDuration.raw() != 0, "DeployOPChainInput: disputeMaxClockDuration not set"); + require(_i.disputeAbsolutePrestate.raw() != bytes32(0), "DeployOPChainInput: disputeAbsolutePrestate not set"); } - function checkOutput(DeployOPChainInput _doi, DeployOPChainOutput _doo) public { + function checkOutput(Types.DeployOPChainInput memory _i, Output memory _o) public { // With 16 addresses, we'd get a stack too deep error if we tried to do this inline as a // single call to `Solarray.addresses`. So we split it into two calls. address[] memory addrs1 = Solarray.addresses( - address(_doo.opChainProxyAdmin()), - address(_doo.addressManager()), - address(_doo.l1ERC721BridgeProxy()), - address(_doo.systemConfigProxy()), - address(_doo.optimismMintableERC20FactoryProxy()), - address(_doo.l1StandardBridgeProxy()), - address(_doo.l1CrossDomainMessengerProxy()) + address(_o.opChainProxyAdmin), + address(_o.addressManager), + address(_o.l1ERC721BridgeProxy), + address(_o.systemConfigProxy), + address(_o.optimismMintableERC20FactoryProxy), + address(_o.l1StandardBridgeProxy), + address(_o.l1CrossDomainMessengerProxy) ); address[] memory addrs2 = Solarray.addresses( - address(_doo.optimismPortalProxy()), - address(_doo.disputeGameFactoryProxy()), - address(_doo.anchorStateRegistryProxy()), - address(_doo.permissionedDisputeGame()), - // address(_doo.faultDisputeGame()), - address(_doo.delayedWETHPermissionedGameProxy()), - address(_doo.ethLockboxProxy()) + address(_o.optimismPortalProxy), + address(_o.disputeGameFactoryProxy), + address(_o.anchorStateRegistryProxy), + address(_o.permissionedDisputeGame), + address(_o.delayedWETHPermissionedGameProxy), + address(_o.ethLockboxProxy) ); // TODO: Eventually switch from Permissioned to Permissionless. Add this address back in. - // address(_delayedWETHPermissionlessGameProxy) + // address(_o.delayedWETHPermissionlessGameProxy) + // address(_o.faultDisputeGame()), DeployUtils.assertValidContractAddresses(Solarray.extend(addrs1, addrs2)); - assertValidDeploy(_doi, _doo); + _assertValidDeploy(_i, _o); } - // -------- Deployment Assertions -------- - function assertValidDeploy(DeployOPChainInput _doi, DeployOPChainOutput _doo) internal { + function _assertValidDeploy(Types.DeployOPChainInput memory _i, Output memory _o) internal { Types.ContractSet memory proxies = Types.ContractSet({ - L1CrossDomainMessenger: address(_doo.l1CrossDomainMessengerProxy()), - L1StandardBridge: address(_doo.l1StandardBridgeProxy()), + L1CrossDomainMessenger: address(_o.l1CrossDomainMessengerProxy), + L1StandardBridge: address(_o.l1StandardBridgeProxy), L2OutputOracle: address(0), - DisputeGameFactory: address(_doo.disputeGameFactoryProxy()), - DelayedWETH: address(_doo.delayedWETHPermissionlessGameProxy()), - PermissionedDelayedWETH: address(_doo.delayedWETHPermissionedGameProxy()), - AnchorStateRegistry: address(_doo.anchorStateRegistryProxy()), - OptimismMintableERC20Factory: address(_doo.optimismMintableERC20FactoryProxy()), - OptimismPortal: address(_doo.optimismPortalProxy()), - ETHLockbox: address(_doo.ethLockboxProxy()), - SystemConfig: address(_doo.systemConfigProxy()), - L1ERC721Bridge: address(_doo.l1ERC721BridgeProxy()), + DisputeGameFactory: address(_o.disputeGameFactoryProxy), + DelayedWETH: address(_o.delayedWETHPermissionlessGameProxy), + PermissionedDelayedWETH: address(_o.delayedWETHPermissionedGameProxy), + AnchorStateRegistry: address(_o.anchorStateRegistryProxy), + OptimismMintableERC20Factory: address(_o.optimismMintableERC20FactoryProxy), + OptimismPortal: address(_o.optimismPortalProxy), + ETHLockbox: address(_o.ethLockboxProxy), + SystemConfig: address(_o.systemConfigProxy), + L1ERC721Bridge: address(_o.l1ERC721BridgeProxy), ProtocolVersions: address(0), SuperchainConfig: address(0) }); - ChainAssertions.checkAnchorStateRegistryProxy(_doo.anchorStateRegistryProxy(), true); + ChainAssertions.checkAnchorStateRegistryProxy(_o.anchorStateRegistryProxy, true); ChainAssertions.checkDisputeGameFactory( - _doo.disputeGameFactoryProxy(), - address(_doi.opChainProxyAdminOwner()), - address(_doo.permissionedDisputeGame()), - true + _o.disputeGameFactoryProxy, _i.opChainProxyAdminOwner, address(_o.permissionedDisputeGame), true ); - ChainAssertions.checkL1CrossDomainMessenger(_doo.l1CrossDomainMessengerProxy(), vm, true); + ChainAssertions.checkL1CrossDomainMessenger(_o.l1CrossDomainMessengerProxy, vm, true); + ChainAssertions.checkOptimismPortal2({ + _contracts: proxies, + _superchainConfig: IOPContractsManager(_i.opcm).superchainConfig(), + _opChainProxyAdminOwner: _i.opChainProxyAdminOwner, + _isProxy: true + }); + ChainAssertions.checkSystemConfigProxies(proxies, _i); + + DeployUtils.assertValidContractAddress(address(_o.l1CrossDomainMessengerProxy)); + DeployUtils.assertResolvedDelegateProxyImplementationSet("OVM_L1CrossDomainMessenger", _o.addressManager); + + // Proxies initialized checks DeployUtils.assertInitialized({ - _contractAddress: address(_doo.l1ERC721BridgeProxy()), + _contractAddress: address(_o.l1ERC721BridgeProxy), _isProxy: true, _slot: 0, _offset: 0 }); DeployUtils.assertInitialized({ - _contractAddress: address(_doo.l1StandardBridgeProxy()), + _contractAddress: address(_o.l1StandardBridgeProxy), _isProxy: true, _slot: 0, _offset: 0 }); DeployUtils.assertInitialized({ - _contractAddress: address(_doo.optimismMintableERC20FactoryProxy()), + _contractAddress: address(_o.optimismMintableERC20FactoryProxy), _isProxy: true, _slot: 0, _offset: 0 }); - ChainAssertions.checkOptimismPortal2({ - _contracts: proxies, - _superchainConfig: _doi.opcm().superchainConfig(), - _opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), - _isProxy: true - }); DeployUtils.assertInitialized({ - _contractAddress: address(_doo.ethLockboxProxy()), + _contractAddress: address(_o.ethLockboxProxy), _isProxy: true, _slot: 0, _offset: 0 }); - ChainAssertions.checkSystemConfig(proxies, _doi, true); - assertValidAddressManager(_doi, _doo); - assertValidOPChainProxyAdmin(_doi, _doo); - } - function assertValidAddressManager(DeployOPChainInput, DeployOPChainOutput _doo) internal view { - require(_doo.addressManager().owner() == address(_doo.opChainProxyAdmin()), "AM-10"); + require(_o.addressManager.owner() == address(_o.opChainProxyAdmin), "AM-10"); + assertValidOPChainProxyAdmin(_i, _o); } - function assertValidOPChainProxyAdmin(DeployOPChainInput _doi, DeployOPChainOutput _doo) internal { - IProxyAdmin admin = _doo.opChainProxyAdmin(); - require(admin.owner() == _doi.opChainProxyAdminOwner(), "OPCPA-10"); + function assertValidOPChainProxyAdmin(Types.DeployOPChainInput memory _doi, Output memory _doo) internal { + IProxyAdmin admin = _doo.opChainProxyAdmin; + require(admin.owner() == _doi.opChainProxyAdminOwner, "OPCPA-10"); require( - admin.getProxyImplementation(address(_doo.l1CrossDomainMessengerProxy())) + admin.getProxyImplementation(address(_doo.l1CrossDomainMessengerProxy)) == DeployUtils.assertResolvedDelegateProxyImplementationSet( - "OVM_L1CrossDomainMessenger", _doo.addressManager() + "OVM_L1CrossDomainMessenger", _doo.addressManager ), "OPCPA-20" ); - require(address(admin.addressManager()) == address(_doo.addressManager()), "OPCPA-30"); + require(address(admin.addressManager()) == address(_doo.addressManager), "OPCPA-30"); require( - admin.getProxyImplementation(address(_doo.l1StandardBridgeProxy())) - == DeployUtils.assertL1ChugSplashImplementationSet(address(_doo.l1StandardBridgeProxy())), + admin.getProxyImplementation(address(_doo.l1StandardBridgeProxy)) + == DeployUtils.assertL1ChugSplashImplementationSet(address(_doo.l1StandardBridgeProxy)), "OPCPA-40" ); require( - admin.getProxyImplementation(address(_doo.l1ERC721BridgeProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.l1ERC721BridgeProxy())), + admin.getProxyImplementation(address(_doo.l1ERC721BridgeProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.l1ERC721BridgeProxy)), "OPCPA-50" ); require( - admin.getProxyImplementation(address(_doo.optimismPortalProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.optimismPortalProxy())), + admin.getProxyImplementation(address(_doo.optimismPortalProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.optimismPortalProxy)), "OPCPA-60" ); require( - admin.getProxyImplementation(address(_doo.systemConfigProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.systemConfigProxy())), + admin.getProxyImplementation(address(_doo.systemConfigProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.systemConfigProxy)), "OPCPA-70" ); require( - admin.getProxyImplementation(address(_doo.optimismMintableERC20FactoryProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.optimismMintableERC20FactoryProxy())), + admin.getProxyImplementation(address(_doo.optimismMintableERC20FactoryProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.optimismMintableERC20FactoryProxy)), "OPCPA-80" ); require( - admin.getProxyImplementation(address(_doo.disputeGameFactoryProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.disputeGameFactoryProxy())), + admin.getProxyImplementation(address(_doo.disputeGameFactoryProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.disputeGameFactoryProxy)), "OPCPA-90" ); require( - admin.getProxyImplementation(address(_doo.delayedWETHPermissionedGameProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.delayedWETHPermissionedGameProxy())), + admin.getProxyImplementation(address(_doo.delayedWETHPermissionedGameProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.delayedWETHPermissionedGameProxy)), "OPCPA-100" ); require( - admin.getProxyImplementation(address(_doo.anchorStateRegistryProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.anchorStateRegistryProxy())), + admin.getProxyImplementation(address(_doo.anchorStateRegistryProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.anchorStateRegistryProxy)), "OPCPA-110" ); require( - admin.getProxyImplementation(address(_doo.ethLockboxProxy())) - == DeployUtils.assertERC1967ImplementationSet(address(_doo.ethLockboxProxy())), + admin.getProxyImplementation(address(_doo.ethLockboxProxy)) + == DeployUtils.assertERC1967ImplementationSet(address(_doo.ethLockboxProxy)), "OPCPA-120" ); } - // -------- Utilities -------- - - function etchIOContracts() public returns (DeployOPChainInput doi_, DeployOPChainOutput doo_) { - (doi_, doo_) = getIOContracts(); - vm.etch(address(doi_), type(DeployOPChainInput).runtimeCode); - vm.etch(address(doo_), type(DeployOPChainOutput).runtimeCode); - } + function startingAnchorRoot() public pure returns (bytes memory) { + // WARNING: For now always hardcode the starting permissioned game anchor root to 0xdead, + // and we do not set anything for the permissioned game. This is because we currently only + // support deploying straight to permissioned games, and the starting root does not + // matter for that, as long as it is non-zero, since no games will be played. We do not + // deploy the permissionless game (and therefore do not set a starting root for it here) + // because to to update to the permissionless game, we will need to update its starting + // anchor root and deploy a new permissioned dispute game contract anyway. + // + // You can `console.logBytes(abi.encode(ScriptConstants.DEFAULT_OUTPUT_ROOT()))` to get the bytes that + // are hardcoded into `op-chain-ops/deployer/opcm/opchain.go` - function getIOContracts() public view returns (DeployOPChainInput doi_, DeployOPChainOutput doo_) { - doi_ = DeployOPChainInput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPChainInput")); - doo_ = DeployOPChainOutput(DeployUtils.toIOAddress(msg.sender, "optimism.DeployOPChainOutput")); + return abi.encode(ScriptConstants.DEFAULT_OUTPUT_ROOT()); } } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol index 13914f167dc7e..c93f547e4f072 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployOwnership.s.sol @@ -7,13 +7,11 @@ import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { GnosisSafeProxyFactory as SafeProxyFactory } from "safe-contracts/proxies/GnosisSafeProxyFactory.sol"; import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; -import { GuardManager } from "safe-contracts/base/GuardManager.sol"; import { Enum as SafeOps } from "safe-contracts/common/Enum.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { LivenessGuard } from "src/safe/LivenessGuard.sol"; -import { LivenessModule } from "src/safe/LivenessModule.sol"; +import { LivenessModule2 } from "src/safe/LivenessModule2.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { Deploy } from "./Deploy.s.sol"; @@ -240,36 +238,14 @@ contract DeployOwnership is Deploy { }); } - /// @notice Deploy a LivenessGuard for use on the Security Council Safe. - /// Note this function does not have the broadcast modifier. - function deployLivenessGuard() public returns (address addr_) { - Safe councilSafe = Safe(payable(artifacts.mustGetAddress("SecurityCouncilSafe"))); - addr_ = address(new LivenessGuard(councilSafe)); - - artifacts.save("LivenessGuard", address(addr_)); - console.log("New LivenessGuard deployed at %s", address(addr_)); - } - - /// @notice Deploy a LivenessModule for use on the Security Council Safe + /// @notice Deploy a LivenessModule2 singleton for use on Security Council Safes /// Note this function does not have the broadcast modifier. function deployLivenessModule() public returns (address addr_) { - Safe councilSafe = Safe(payable(artifacts.mustGetAddress("SecurityCouncilSafe"))); - address guard = artifacts.mustGetAddress("LivenessGuard"); - LivenessModuleConfig memory livenessModuleConfig = _getExampleCouncilConfig().livenessModuleConfig; - - addr_ = address( - new LivenessModule({ - _safe: councilSafe, - _livenessGuard: LivenessGuard(guard), - _livenessInterval: livenessModuleConfig.livenessInterval, - _thresholdPercentage: livenessModuleConfig.thresholdPercentage, - _minOwners: livenessModuleConfig.minOwners, - _fallbackOwner: livenessModuleConfig.fallbackOwner - }) - ); + // Deploy the singleton LivenessModule2 (no parameters needed) + addr_ = address(new LivenessModule2()); - artifacts.save("LivenessModule", address(addr_)); - console.log("New LivenessModule deployed at %s", address(addr_)); + artifacts.save("LivenessModule2", address(addr_)); + console.log("New LivenessModule2 deployed at %s", address(addr_)); } /// @notice Deploy a Security Council Safe. @@ -319,11 +295,6 @@ contract DeployOwnership is Deploy { SecurityCouncilConfig memory exampleCouncilConfig = _getExampleCouncilConfig(); Safe safe = Safe(artifacts.mustGetAddress("SecurityCouncilSafe")); - // Deploy and add the Liveness Guard. - address guard = deployLivenessGuard(); - _callViaSafe({ _safe: safe, _target: address(safe), _data: abi.encodeCall(GuardManager.setGuard, (guard)) }); - console.log("LivenessGuard setup on SecurityCouncilSafe"); - // Deploy and add the Liveness Module. address livenessModule = deployLivenessModule(); _callViaSafe({ @@ -332,13 +303,35 @@ contract DeployOwnership is Deploy { _data: abi.encodeCall(ModuleManager.enableModule, (livenessModule)) }); + // Configure the LivenessModule2 (second step of installation) + LivenessModuleConfig memory livenessModuleConfig = exampleCouncilConfig.livenessModuleConfig; + _callViaSafe({ + _safe: safe, + _target: livenessModule, + _data: abi.encodeCall( + LivenessModule2.configureLivenessModule, + ( + LivenessModule2.ModuleConfig({ + livenessResponsePeriod: livenessModuleConfig.livenessInterval, + fallbackOwner: livenessModuleConfig.fallbackOwner + }) + ) + ) + }); + // Finalize configuration by removing the additional deployer key. removeDeployerFromSafe({ _name: "SecurityCouncilSafe", _newThreshold: exampleCouncilConfig.safeConfig.threshold }); - address[] memory owners = safe.getOwners(); + // Verify the module was configured correctly + (uint256 configuredPeriod, address configuredFallback) = + LivenessModule2(livenessModule).livenessSafeConfiguration(address(safe)); + require( + configuredPeriod == exampleCouncilConfig.livenessModuleConfig.livenessInterval, + "DeployOwnership: configured liveness interval must match expected value" + ); require( - safe.getThreshold() == LivenessModule(livenessModule).getRequiredThreshold(owners.length), - "DeployOwnership: safe threshold must be equal to the LivenessModule's required threshold" + configuredFallback == exampleCouncilConfig.livenessModuleConfig.fallbackOwner, + "DeployOwnership: configured fallback owner must match expected value" ); addr_ = address(safe); diff --git a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol index 8b3e5117906f7..548d1e5e367f3 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol @@ -56,6 +56,12 @@ contract DeploySuperchain is Script { // -------- Core Deployment Methods -------- + function runWithBytes(bytes memory _input) public returns (bytes memory) { + Input memory input = abi.decode(_input, (Input)); + Output memory output = run(input); + return abi.encode(output); + } + function run(Input memory _input) public returns (Output memory output_) { // Convert the external Input to InternalInput InternalInput memory internalInput = toInternalInput(_input); diff --git a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol index e7b7f76edfd7c..573b4d8235983 100644 --- a/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/ReadImplementationAddresses.s.sol @@ -1,168 +1,81 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -import { BaseDeployIO } from "scripts/deploy/BaseDeployIO.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; import { Script } from "forge-std/Script.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; -import { DeployOPChainOutput } from "scripts/deploy/DeployOPChain.s.sol"; -import { IMIPS } from "interfaces/cannon/IMIPS.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IStaticL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; -contract ReadImplementationAddressesInput is DeployOPChainOutput { - IOPContractsManager internal _opcm; - - function set(bytes4 _sel, address _addr) public override { - require(_addr != address(0), "ReadImplementationAddressesInput: cannot set zero address"); - if (_sel == this.opcm.selector) _opcm = IOPContractsManager(_addr); - else if (_sel == this.addressManager.selector) _addressManager = IAddressManager(_addr); - else super.set(_sel, _addr); - } - - function opcm() public view returns (IOPContractsManager) { - DeployUtils.assertValidContractAddress(address(_opcm)); - return _opcm; - } -} - -contract ReadImplementationAddressesOutput is BaseDeployIO { - address internal _delayedWETH; - address internal _optimismPortal; - address internal _ethLockbox; - address internal _systemConfig; - address internal _l1CrossDomainMessenger; - address internal _l1ERC721Bridge; - address internal _l1StandardBridge; - address internal _optimismMintableERC20Factory; - address internal _disputeGameFactory; - address internal _mipsSingleton; - address internal _preimageOracleSingleton; - - function set(bytes4 _sel, address _addr) public { - require(_addr != address(0), "ReadImplementationAddressesOutput: cannot set zero address"); - if (_sel == this.delayedWETH.selector) _delayedWETH = _addr; - else if (_sel == this.optimismPortal.selector) _optimismPortal = _addr; - else if (_sel == this.ethLockbox.selector) _ethLockbox = _addr; - else if (_sel == this.systemConfig.selector) _systemConfig = _addr; - else if (_sel == this.l1CrossDomainMessenger.selector) _l1CrossDomainMessenger = _addr; - else if (_sel == this.l1ERC721Bridge.selector) _l1ERC721Bridge = _addr; - else if (_sel == this.l1StandardBridge.selector) _l1StandardBridge = _addr; - else if (_sel == this.optimismMintableERC20Factory.selector) _optimismMintableERC20Factory = _addr; - else if (_sel == this.disputeGameFactory.selector) _disputeGameFactory = _addr; - else if (_sel == this.mipsSingleton.selector) _mipsSingleton = _addr; - else if (_sel == this.preimageOracleSingleton.selector) _preimageOracleSingleton = _addr; - else revert("ReadImplementationAddressesOutput: unknown selector"); - } - - function delayedWETH() public view returns (address) { - require(_delayedWETH != address(0), "ReadImplementationAddressesOutput: delayedWETH not set"); - return _delayedWETH; - } - - function optimismPortal() public view returns (address) { - require(_optimismPortal != address(0), "ReadImplementationAddressesOutput: optimismPortal not set"); - return _optimismPortal; - } - - function ethLockbox() public view returns (address) { - require(_ethLockbox != address(0), "ReadImplementationAddressesOutput: ethLockbox not set"); - return _ethLockbox; - } - - function systemConfig() public view returns (address) { - require(_systemConfig != address(0), "ReadImplementationAddressesOutput: systemConfig not set"); - return _systemConfig; +contract ReadImplementationAddresses is Script { + struct Input { + address addressManager; + address l1ERC721BridgeProxy; + address systemConfigProxy; + address optimismMintableERC20FactoryProxy; + address l1StandardBridgeProxy; + address optimismPortalProxy; + address disputeGameFactoryProxy; + address delayedWETHPermissionedGameProxy; + address opcm; } - function l1CrossDomainMessenger() public view returns (address) { - require( - _l1CrossDomainMessenger != address(0), "ReadImplementationAddressesOutput: l1CrossDomainMessenger not set" - ); - return _l1CrossDomainMessenger; + struct Output { + address delayedWETH; + address optimismPortal; + address optimismPortalInterop; + address ethLockbox; + address systemConfig; + address l1CrossDomainMessenger; + address l1ERC721Bridge; + address l1StandardBridge; + address optimismMintableERC20Factory; + address disputeGameFactory; + address mipsSingleton; + address preimageOracleSingleton; } - function l1ERC721Bridge() public view returns (address) { - require(_l1ERC721Bridge != address(0), "ReadImplementationAddressesOutput: l1ERC721Bridge not set"); - return _l1ERC721Bridge; - } + function run(Input memory _input) public returns (Output memory output_) { + // Get implementations from EIP-1967 proxies + output_.delayedWETH = getEIP1967Impl(_input.delayedWETHPermissionedGameProxy); + output_.optimismPortal = getEIP1967Impl(_input.optimismPortalProxy); + output_.systemConfig = getEIP1967Impl(_input.systemConfigProxy); + output_.l1ERC721Bridge = getEIP1967Impl(_input.l1ERC721BridgeProxy); + output_.optimismMintableERC20Factory = getEIP1967Impl(_input.optimismMintableERC20FactoryProxy); + output_.disputeGameFactory = getEIP1967Impl(_input.disputeGameFactoryProxy); - function l1StandardBridge() public view returns (address) { - require(_l1StandardBridge != address(0), "ReadImplementationAddressesOutput: l1StandardBridge not set"); - return _l1StandardBridge; - } + // Get L1StandardBridge implementation (uses different proxy type) + vm.prank(address(0)); + output_.l1StandardBridge = IStaticL1ChugSplashProxy(_input.l1StandardBridgeProxy).getImplementation(); - function optimismMintableERC20Factory() public view returns (address) { - require( - _optimismMintableERC20Factory != address(0), - "ReadImplementationAddressesOutput: optimismMintableERC20Factory not set" - ); - return _optimismMintableERC20Factory; - } + // Get implementations from OPCM + IOPContractsManager opcm = IOPContractsManager(_input.opcm); + output_.mipsSingleton = opcm.implementations().mipsImpl; + output_.delayedWETH = opcm.implementations().delayedWETHImpl; + output_.ethLockbox = opcm.implementations().ethLockboxImpl; + output_.optimismPortalInterop = opcm.implementations().optimismPortalInteropImpl; - function disputeGameFactory() public view returns (address) { - require(_disputeGameFactory != address(0), "ReadImplementationAddressesOutput: disputeGameFactory not set"); - return _disputeGameFactory; - } + // Get L1CrossDomainMessenger from AddressManager + IAddressManager am = IAddressManager(_input.addressManager); + output_.l1CrossDomainMessenger = am.getAddress("OVM_L1CrossDomainMessenger"); - function mipsSingleton() public view returns (address) { - require(_mipsSingleton != address(0), "ReadImplementationAddressesOutput: mipsSingleton not set"); - return _mipsSingleton; + // Get PreimageOracle from MIPS singleton + output_.preimageOracleSingleton = address(IMIPS64(output_.mipsSingleton).oracle()); } - function preimageOracleSingleton() public view returns (address) { - require( - _preimageOracleSingleton != address(0), "ReadImplementationAddressesOutput: preimageOracleSingleton not set" - ); - return _preimageOracleSingleton; + function runWithBytes(bytes memory _input) public returns (bytes memory) { + Input memory input = abi.decode(_input, (Input)); + Output memory output = run(input); + return abi.encode(output); } -} - -contract ReadImplementationAddresses is Script { - function run(ReadImplementationAddressesInput _rii, ReadImplementationAddressesOutput _rio) public { - address[6] memory eip1967Proxies = [ - address(_rii.delayedWETHPermissionedGameProxy()), - address(_rii.optimismPortalProxy()), - address(_rii.systemConfigProxy()), - address(_rii.l1ERC721BridgeProxy()), - address(_rii.optimismMintableERC20FactoryProxy()), - address(_rii.disputeGameFactoryProxy()) - ]; - - bytes4[6] memory sels = [ - _rio.delayedWETH.selector, - _rio.optimismPortal.selector, - _rio.systemConfig.selector, - _rio.l1ERC721Bridge.selector, - _rio.optimismMintableERC20Factory.selector, - _rio.disputeGameFactory.selector - ]; - - for (uint256 i = 0; i < eip1967Proxies.length; i++) { - IProxy proxy = IProxy(payable(eip1967Proxies[i])); - vm.prank(address(0)); - _rio.set(sels[i], proxy.implementation()); - } + /// @notice Gets the implementation address from an EIP-1967 proxy + /// @param _proxy The proxy address to read from + /// @return impl_ The implementation address + function getEIP1967Impl(address _proxy) private returns (address impl_) { + IProxy proxy = IProxy(payable(_proxy)); vm.prank(address(0)); - address l1SBImpl = IStaticL1ChugSplashProxy(address(_rii.l1StandardBridgeProxy())).getImplementation(); - vm.prank(address(0)); - _rio.set(_rio.l1StandardBridge.selector, l1SBImpl); - - address mipsLogic = _rii.opcm().implementations().mipsImpl; - _rio.set(_rio.mipsSingleton.selector, mipsLogic); - - address delayedWETH = _rii.opcm().implementations().delayedWETHImpl; - _rio.set(_rio.delayedWETH.selector, delayedWETH); - - IAddressManager am = _rii.addressManager(); - _rio.set(_rio.l1CrossDomainMessenger.selector, am.getAddress("OVM_L1CrossDomainMessenger")); - - address preimageOracle = address(IMIPS(mipsLogic).oracle()); - _rio.set(_rio.preimageOracleSingleton.selector, preimageOracle); - - address ethLockbox = _rii.opcm().implementations().ethLockboxImpl; - _rio.set(_rio.ethLockbox.selector, ethLockbox); + impl_ = proxy.implementation(); } } diff --git a/packages/contracts-bedrock/scripts/deploy/StandardConstants.sol b/packages/contracts-bedrock/scripts/deploy/StandardConstants.sol index e3ac46ec44bc3..15b10faa660d1 100644 --- a/packages/contracts-bedrock/scripts/deploy/StandardConstants.sol +++ b/packages/contracts-bedrock/scripts/deploy/StandardConstants.sol @@ -2,5 +2,5 @@ pragma solidity 0.8.15; library StandardConstants { - uint256 public constant MIPS_VERSION = 7; + uint256 public constant MIPS_VERSION = 8; } diff --git a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol index 1359f4eda4799..f67fd6448495a 100644 --- a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol @@ -40,12 +40,21 @@ contract VerifyOPCM is Script { /// @notice Thrown when an artifact file is empty. error VerifyOPCM_EmptyArtifactFile(string _artifactPath); + /// @notice Thrown when contractsContainer addresses are not the same across all OPCM components. + error VerifyOPCM_ContractsContainerMismatch(); + /// @notice Thrown when the creation bytecode is not found in an artifact file. error VerifyOPCM_CreationBytecodeNotFound(string _artifactPath); /// @notice Thrown when the runtime bytecode is not found in an artifact file. error VerifyOPCM_RuntimeBytecodeNotFound(string _artifactPath); + /// @notice Thrown when there are getter functions in the ABI that are not being checked. + error VerifyOPCM_UnaccountedGetters(string[] _unaccountedGetters); + + /// @notice Thrown when the dev feature bitmap is not empty on mainnet. + error VerifyOPCM_DevFeatureBitmapNotEmpty(); + /// @notice Preamble used for blueprint contracts. bytes constant BLUEPRINT_PREAMBLE = hex"FE7100"; @@ -87,6 +96,13 @@ contract VerifyOPCM is Script { /// @notice Maps contract names to an overriding source file name. mapping(string => string) internal sourceNameOverrides; + /// @notice Maps expected getter function names to their verification method. + /// Value can be either: + /// - An environment variable name (e.g., "EXPECTED_SUPERCHAIN_CONFIG") for getters verified via env vars + /// - "SKIP" for getters verified elsewhere in the verification process + /// WARNING: Do NOT add new getters without understanding their verification method! + mapping(string => string) internal expectedGetters; + /// @notice Setup flag. bool internal ready; @@ -94,6 +110,7 @@ contract VerifyOPCM is Script { function setUp() public { // Overrides for situations where field names do not cleanly map to contract names. fieldNameOverrides["optimismPortalImpl"] = "OptimismPortal2"; + fieldNameOverrides["optimismPortalInteropImpl"] = "OptimismPortalInterop"; fieldNameOverrides["mipsImpl"] = "MIPS64"; fieldNameOverrides["ethLockboxImpl"] = "ETHLockbox"; fieldNameOverrides["permissionlessDisputeGame1"] = "FaultDisputeGame"; @@ -109,12 +126,38 @@ contract VerifyOPCM is Script { fieldNameOverrides["opcmUpgrader"] = "OPContractsManagerUpgrader"; fieldNameOverrides["opcmInteropMigrator"] = "OPContractsManagerInteropMigrator"; fieldNameOverrides["opcmStandardValidator"] = "OPContractsManagerStandardValidator"; + fieldNameOverrides["contractsContainer"] = "OPContractsManagerContractsContainer"; // Overrides for situations where contracts have differently named source files. sourceNameOverrides["OPContractsManagerGameTypeAdder"] = "OPContractsManager"; sourceNameOverrides["OPContractsManagerDeployer"] = "OPContractsManager"; sourceNameOverrides["OPContractsManagerUpgrader"] = "OPContractsManager"; sourceNameOverrides["OPContractsManagerInteropMigrator"] = "OPContractsManager"; + sourceNameOverrides["OPContractsManagerContractsContainer"] = "OPContractsManager"; + + // Expected getter functions and their verification methods. + // CRITICAL: Any getter in the ABI that's not in this list will cause verification to fail. + // NEVER add a getter without understanding HOW it's being verified! + + // Getters verified via bytecode comparison (blueprints/implementations contain addresses) + expectedGetters["blueprints"] = "SKIP"; // Verified via bytecode comparison of blueprint contracts + expectedGetters["implementations"] = "SKIP"; // Verified via bytecode comparison of implementation contracts + + // Getters verified via environment variables in _verifyOpcmImmutableVariables() + expectedGetters["protocolVersions"] = "EXPECTED_PROTOCOL_VERSIONS"; + expectedGetters["superchainConfig"] = "EXPECTED_SUPERCHAIN_CONFIG"; + expectedGetters["superchainProxyAdmin"] = "EXPECTED_SUPERCHAIN_PROXY_ADMIN"; + + // Getters for OPCM sub-contracts (addresses verified via bytecode comparison) + expectedGetters["opcmDeployer"] = "SKIP"; // Address verified via bytecode comparison + expectedGetters["opcmGameTypeAdder"] = "SKIP"; // Address verified via bytecode comparison + expectedGetters["opcmInteropMigrator"] = "SKIP"; // Address verified via bytecode comparison + expectedGetters["opcmStandardValidator"] = "SKIP"; // Address verified via bytecode comparison + expectedGetters["opcmUpgrader"] = "SKIP"; // Address verified via bytecode comparison + + // Getters that don't need any sort of verification + expectedGetters["devFeatureBitmap"] = "SKIP"; + expectedGetters["isDevFeatureEnabled"] = "SKIP"; // Mark as ready. ready = true; @@ -158,6 +201,12 @@ contract VerifyOPCM is Script { // Fetch Implementations & Blueprints from OPCM IOPContractsManager opcm = IOPContractsManager(_opcmAddress); + // Validate that all ABI getters are accounted for. + _validateAllGettersAccounted(); + + // Validate that the dev feature bitmap is empty on mainnet. + _validateDevFeatureBitmap(opcm); + // Collect all the references. OpcmContractRef[] memory refs = _collectOpcmContractRefs(opcm); @@ -187,6 +236,19 @@ contract VerifyOPCM is Script { revert VerifyOPCM_NoProperties(); } + // Verify that all component contracts have the same contractsContainer address. + _verifyContractsContainerConsistency(propRefs); + + // Get the ContractsContainer address from the first component (they're all the same) + address contractsContainerAddr = address(0); + for (uint256 i = 0; i < propRefs.length; i++) { + string memory field = propRefs[i].field; + if (_hasContractsContainer(field)) { + contractsContainerAddr = _getContractsContainerAddress(propRefs[i].addr); + break; + } + } + // Collect implementation references. OpcmContractRef[] memory implRefs = _getOpcmContractRefs(_opcm, "implementations", false); if (implRefs.length == 0) { @@ -200,12 +262,18 @@ contract VerifyOPCM is Script { } // Create a single array to join everything together. - uint256 extraRefs = 1; + uint256 extraRefs = 2; // OPCM + ContractsContainer OpcmContractRef[] memory refs = new OpcmContractRef[](propRefs.length + implRefs.length + bpRefs.length + extraRefs); // References for OPCM and linked contracts. refs[0] = OpcmContractRef({ field: "opcm", name: "OPContractsManager", addr: address(_opcm), blueprint: false }); + refs[1] = OpcmContractRef({ + field: "contractsContainer", + name: "OPContractsManagerContractsContainer", + addr: contractsContainerAddr, + blueprint: false + }); // Add the property references. for (uint256 i = 0; i < propRefs.length; i++) { @@ -226,6 +294,92 @@ contract VerifyOPCM is Script { return refs; } + /// @notice Verifies that all OPCM component contracts have the same contractsContainer address. + /// @param _propRefs Array of property references containing component addresses. + function _verifyContractsContainerConsistency(OpcmContractRef[] memory _propRefs) internal view { + // Process components that have contractsContainer(), validate addresses, and verify consistency + OpcmContractRef[] memory components = new OpcmContractRef[](_propRefs.length); + address[] memory containerAddresses = new address[](_propRefs.length); + uint256 componentCount = 0; + address expectedContainer = address(0); + + for (uint256 i = 0; i < _propRefs.length; i++) { + OpcmContractRef memory propRef = _propRefs[i]; + + if (!_hasContractsContainer(propRef.field)) { + continue; + } + + components[componentCount] = propRef; + address containerAddr = _getContractsContainerAddress(propRef.addr); + + if (containerAddr == address(0)) { + console.log(string.concat("ERROR: Failed to retrieve contractsContainer address from ", propRef.field)); + revert VerifyOPCM_ContractsContainerMismatch(); + } + + containerAddresses[componentCount] = containerAddr; + + if (componentCount == 0) { + expectedContainer = containerAddr; + } else if (containerAddr != expectedContainer) { + _logContainerAddressMismatch(components, containerAddresses, componentCount); + revert VerifyOPCM_ContractsContainerMismatch(); + } + + componentCount++; + } + + // Ensure we found at least one component + if (componentCount == 0) { + console.log("ERROR: No OPCM components found for contractsContainer verification"); + revert VerifyOPCM_ContractsContainerMismatch(); + } + + console.log( + string.concat( + "OK: All ", vm.toString(componentCount), " components have the same contractsContainer address" + ) + ); + console.log(string.concat(" contractsContainer: ", vm.toString(expectedContainer))); + } + + /// @notice Logs container address mismatch details for debugging. + /// @param _components Array of components found so far. + /// @param _containerAddresses Array of container addresses for each component. + /// @param _componentCount Number of components processed. + function _logContainerAddressMismatch( + OpcmContractRef[] memory _components, + address[] memory _containerAddresses, + uint256 _componentCount + ) + internal + pure + { + console.log("ERROR: contractsContainer addresses are not consistent across all components"); + for (uint256 j = 0; j <= _componentCount; j++) { + console.log(string.concat(" ", _components[j].field, ": ", vm.toString(_containerAddresses[j]))); + } + } + + /// @notice Gets the contractsContainer address from a contract. + /// @param _contract The contract address to query. + /// @return The contractsContainer address. + function _getContractsContainerAddress(address _contract) internal view returns (address) { + // Call the contractsContainer() function on the contract. + // nosemgrep: sol-style-use-abi-encodecall + (bool success, bytes memory returnData) = _contract.staticcall(abi.encodeWithSignature("contractsContainer()")); + if (!success) { + console.log( + string.concat( + "[FAIL] ERROR: Failed to call contractsContainer() function on contract ", vm.toString(_contract) + ) + ); + return address(0); + } + return abi.decode(returnData, (address)); + } + /// @notice Verifies a single OPCM contract reference (implementation or bytecode). /// @param _target The target contract reference to verify. /// @param _skipConstructorVerification Whether to skip constructor verification. @@ -328,6 +482,11 @@ contract VerifyOPCM is Script { } } + // If this is the OPCM contract itself, verify the immutable variables as well. + if (keccak256(bytes(_target.field)) == keccak256(bytes("opcm"))) { + success = _verifyOpcmImmutableVariables(IOPContractsManager(_target.addr)) && success; + } + // Log final status for this field. if (success) { console.log(string.concat("Status: [OK] Verified ", _target.name)); @@ -338,6 +497,69 @@ contract VerifyOPCM is Script { return success; } + /// @notice Verifies that the immutable variables in the OPCM contract match expected values. + /// @param _opcm The OPCM contract to verify immutable variables for. + /// @return True if all immutable variables are verified, false otherwise. + function _verifyOpcmImmutableVariables(IOPContractsManager _opcm) internal returns (bool) { + console.log(" Verifying OPCM immutable variables..."); + + bool success = true; + + // Get all OPCM getters and iterate over them + // Note: We use the pattern `success = false; continue;` for failures to ensure + // comprehensive reporting. Once success is false, it should never be reset to true. + // This allows us to collect and report ALL issues in a single verification run. + string[] memory allGetters = _getOpcmGetters(); + + for (uint256 i = 0; i < allGetters.length; i++) { + string memory functionName = allGetters[i]; + string memory verificationMethod = expectedGetters[functionName]; + + // All getters must be accounted for in expectedGetters mapping + if (bytes(verificationMethod).length == 0) { + console.log("ERROR: Getter '%s' is not accounted for in expectedGetters mapping", functionName); + success = false; + continue; + } + + // Skip getters that don't need env var verification + if (keccak256(bytes(verificationMethod)) == keccak256(bytes("SKIP"))) { + continue; + } + + // Get expected address from environment variable + // nosemgrep: sol-style-vm-env-only-in-config-sol + address expectedAddress = vm.envAddress(verificationMethod); + + // Call the function to retrieve the actual address + // nosemgrep: sol-style-use-abi-encodecall + (bool callSuccess, bytes memory returnedData) = + address(_opcm).staticcall(abi.encodeWithSignature(string.concat(functionName, "()"))); + + if (!callSuccess) { + console.log(string.concat(" [FAIL] ERROR: Failed to call ", functionName, "() function on OPCM.")); + success = false; + continue; + } + + // Decode as an address + address actualAddress = abi.decode(returnedData, (address)); + + // Log the comparison + console.log(string.concat(" ", functionName, ": ", vm.toString(actualAddress))); + console.log(string.concat(" expected: ", vm.toString(expectedAddress))); + + if (actualAddress != expectedAddress) { + console.log(string.concat(" [FAIL] ERROR: ", functionName, " mismatch")); + success = false; + } else { + console.log(string.concat(" [OK] ", functionName, " verified")); + } + } + + return success; + } + /// @notice Loads artifact info from a JSON file using Foundry's parsing capabilities. /// @param _artifactPath Path to the artifact JSON file. /// @return info The parsed artifact information containing bytecode and immutable references. @@ -657,4 +879,90 @@ contract VerifyOPCM is Script { // Return computed path, relative to the contracts-bedrock directory. return string.concat("forge-artifacts/", sourceName, ".sol/", _contractName, ".json"); } + + /// @notice Checks if a field name represents an OPCM component contract that has contractsContainer(). + /// @param _field The field name to check. + /// @return True if the field represents an OPCM component with contractsContainer(), false otherwise. + function _hasContractsContainer(string memory _field) internal pure returns (bool) { + // Check if it starts with "opcm" + if (!LibString.startsWith(_field, "opcm")) { + return false; + } + + // Components that start with "opcm" but don't extend OPContractsManagerBase (and thus don't have + // contractsContainer()) + string[] memory exclusions = new string[](1); + exclusions[0] = "opcmStandardValidator"; + + // Check if the field is in the exclusion list + for (uint256 i = 0; i < exclusions.length; i++) { + if (LibString.eq(_field, exclusions[i])) { + return false; + } + } + + return true; + } + + /// @notice Gets all OPCM getter function names from the ABI. + /// @return Array of getter function names found in the OPContractsManager ABI. + function _getOpcmGetters() internal returns (string[] memory) { + return abi.decode( + vm.parseJson( + Process.bash( + string.concat( + "jq -r '[.abi[] | select(.type == \"function\" and .stateMutability == \"view\" and (.inputs | length) == 0) | .name]' ", + _buildArtifactPath("OPContractsManager") + ) + ) + ), + (string[]) + ); + } + + /// @notice Validates that the dev feature bitmap is empty on mainnet. + /// @param _opcm The OPCM contract. + function _validateDevFeatureBitmap(IOPContractsManager _opcm) internal view { + // Get the dev feature bitmap. + bytes32 devFeatureBitmap = _opcm.devFeatureBitmap(); + + // Check if we're in a testing environment. + bool isTestingEnvironment = address(0xbeefcafe).code.length > 0; + + // Check if any dev features are enabled. + if (block.chainid == 1 && !isTestingEnvironment && devFeatureBitmap != bytes32(0)) { + revert VerifyOPCM_DevFeatureBitmapNotEmpty(); + } + } + + /// @notice Validates that all getter functions in the OPContractsManager ABI are accounted for + /// in the expectedGetters mapping. This ensures we don't miss any new getters that + /// might be added to the contract. + function _validateAllGettersAccounted() internal { + // Get all function names from the OPContractsManager ABI + string[] memory allFunctions = _getOpcmGetters(); + + // Check for any functions that are not in our expectedGetters mapping + string[] memory unaccountedGetters = new string[](allFunctions.length); + uint256 unaccountedCount = 0; + + for (uint256 i = 0; i < allFunctions.length; i++) { + string memory functionName = allFunctions[i]; + // Check if the getter is not in our mapping (empty string means not set) + if (bytes(expectedGetters[functionName]).length == 0) { + unaccountedGetters[unaccountedCount] = functionName; + unaccountedCount++; + } + } + + // If there are unaccounted getters, revert with the list + if (unaccountedCount > 0) { + // Create a trimmed array with only the unaccounted getters + string[] memory trimmedUnaccounted = new string[](unaccountedCount); + for (uint256 i = 0; i < unaccountedCount; i++) { + trimmedUnaccounted[i] = unaccountedGetters[i]; + } + revert VerifyOPCM_UnaccountedGetters(trimmedUnaccounted); + } + } } diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index 2e7f032d86b9b..739fe88e6fbfa 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -36,11 +36,11 @@ enum Fork { GRANITE, HOLOCENE, ISTHMUS, - INTEROP, - JOVIAN + JOVIAN, + INTEROP } -Fork constant LATEST_FORK = Fork.JOVIAN; +Fork constant LATEST_FORK = Fork.INTEROP; library ForkUtils { function toString(Fork _fork) internal pure returns (string memory) { @@ -235,4 +235,19 @@ library Config { function forkTest() internal view returns (bool) { return vm.envOr("FORK_TEST", false); } + + /// @notice Returns true if the development feature interop is enabled. + function devFeatureInterop() internal view returns (bool) { + return vm.envOr("DEV_FEATURE__OPTIMISM_PORTAL_INTEROP", false); + } + + /// @notice Returns true if the development feature cannon_kona is enabled. + function devFeatureCannonKona() internal view returns (bool) { + return vm.envOr("DEV_FEATURE__CANNON_KONA", false); + } + + /// @notice Returns true if the development feature deploy_v2_dispute_games is enabled. + function devFeatureDeployV2DisputeGames() internal view returns (bool) { + return vm.envOr("DEV_FEATURE__DEPLOY_V2_DISPUTE_GAMES", false); + } } diff --git a/packages/contracts-bedrock/scripts/libraries/Types.sol b/packages/contracts-bedrock/scripts/libraries/Types.sol index f8c6eb9026463..52f8557d97787 100644 --- a/packages/contracts-bedrock/scripts/libraries/Types.sol +++ b/packages/contracts-bedrock/scripts/libraries/Types.sol @@ -1,6 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +import { Claim, Duration, GameType } from "src/dispute/lib/Types.sol"; + library Types { /// @notice Represents a set of L1 contracts. Used to represent a set of proxies. /// This is not an exhaustive list of all contracts on L1, but rather a subset. @@ -20,4 +22,32 @@ library Types { address ProtocolVersions; address SuperchainConfig; } + + struct DeployOPChainInput { + // Roles + address opChainProxyAdminOwner; + address systemConfigOwner; + address batcher; + address unsafeBlockSigner; + address proposer; + address challenger; + // TODO Add fault proofs inputs in a future PR. + uint32 basefeeScalar; + uint32 blobBaseFeeScalar; + uint256 l2ChainId; + address opcm; + string saltMixer; + uint64 gasLimit; + // Configurable dispute game inputs + GameType disputeGameType; + Claim disputeAbsolutePrestate; + uint256 disputeMaxGameDepth; + uint256 disputeSplitDepth; + Duration disputeClockExtension; + Duration disputeMaxClockDuration; + bool allowCustomDisputeParameters; + // Fee params + uint32 operatorFeeScalar; + uint64 operatorFeeConstant; + } } diff --git a/packages/contracts-bedrock/snapshots/abi/DisputeGameFactory.json b/packages/contracts-bedrock/snapshots/abi/DisputeGameFactory.json index c7a791ed32ecb..016224be139b0 100644 --- a/packages/contracts-bedrock/snapshots/abi/DisputeGameFactory.json +++ b/packages/contracts-bedrock/snapshots/abi/DisputeGameFactory.json @@ -89,6 +89,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "GameType", + "name": "", + "type": "uint32" + } + ], + "name": "gameArgs", + "outputs": [ + { + "internalType": "bytes", + "name": "", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -322,6 +341,29 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + }, + { + "internalType": "contract IDisputeGame", + "name": "_impl", + "type": "address" + }, + { + "internalType": "bytes", + "name": "_args", + "type": "bytes" + } + ], + "name": "setImplementation", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -391,6 +433,25 @@ "name": "DisputeGameCreated", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "args", + "type": "bytes" + } + ], + "name": "ImplementationArgsSet", + "type": "event" + }, { "anonymous": false, "inputs": [ diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json index e19ce9e2812a4..26a1351d53d79 100644 --- a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGame.json @@ -1016,6 +1016,11 @@ "name": "AnchorRootNotFound", "type": "error" }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, { "inputs": [], "name": "BlockNumberMatches", diff --git a/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json new file mode 100644 index 0000000000000..d4a4f83892adf --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/FaultDisputeGameV2.json @@ -0,0 +1,1195 @@ +[ + { + "inputs": [ + { + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + } + ], + "internalType": "struct FaultDisputeGameV2.GameConstructorParams", + "name": "_params", + "type": "tuple" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [ + { + "internalType": "Claim", + "name": "absolutePrestate_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_ident", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_execLeafIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_partOffset", + "type": "uint256" + } + ], + "name": "addLocalData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "anchorStateRegistry", + "outputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "registry_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "attack", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "bondDistributionMode", + "outputs": [ + { + "internalType": "enum BondDistributionMode", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "version", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "stateRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "messagePasserStorageRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "latestBlockhash", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootProof", + "name": "_outputRootProof", + "type": "tuple" + }, + { + "internalType": "bytes", + "name": "_headerRLP", + "type": "bytes" + } + ], + "name": "challengeRootL2Block", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "claimCredit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "claimData", + "outputs": [ + { + "internalType": "uint32", + "name": "parentIndex", + "type": "uint32" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + }, + { + "internalType": "address", + "name": "claimant", + "type": "address" + }, + { + "internalType": "uint128", + "name": "bond", + "type": "uint128" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "Position", + "name": "position", + "type": "uint128" + }, + { + "internalType": "Clock", + "name": "clock", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimDataLen", + "outputs": [ + { + "internalType": "uint256", + "name": "len_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Hash", + "name": "", + "type": "bytes32" + } + ], + "name": "claims", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clockExtension", + "outputs": [ + { + "internalType": "Duration", + "name": "clockExtension_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "closeGame", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "createdAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "credit", + "outputs": [ + { + "internalType": "uint256", + "name": "credit_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "defend", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "extraData", + "outputs": [ + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameCreator", + "outputs": [ + { + "internalType": "address", + "name": "creator_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameData", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameType", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getChallengerDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "duration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getNumToResolve", + "outputs": [ + { + "internalType": "uint256", + "name": "numRemainingChildren_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Position", + "name": "_position", + "type": "uint128" + } + ], + "name": "getRequiredBond", + "outputs": [ + { + "internalType": "uint256", + "name": "requiredBond_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "hasUnlockedCredit", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initialize", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "l1Head", + "outputs": [ + { + "internalType": "Hash", + "name": "l1Head_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2BlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenged", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenger", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2ChainId", + "outputs": [ + { + "internalType": "uint256", + "name": "l2ChainId_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2SequenceNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2SequenceNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "maxClockDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "maxClockDuration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxGameDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "maxGameDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_challengeIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + } + ], + "name": "move", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "normalModeCredit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "refundModeCredit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolutionCheckpoints", + "outputs": [ + { + "internalType": "bool", + "name": "initialCheckpointComplete", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "subgameIndex", + "type": "uint32" + }, + { + "internalType": "Position", + "name": "leftmostPosition", + "type": "uint128" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "resolve", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "status_", + "type": "uint8" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_numToResolve", + "type": "uint256" + } + ], + "name": "resolveClaim", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "resolvedAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolvedSubgames", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rootClaim", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "splitDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "splitDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingBlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "startingBlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingOutputRoot", + "outputs": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2SequenceNumber", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingRootHash", + "outputs": [ + { + "internalType": "Hash", + "name": "startingRootHash_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "status", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + } + ], + "name": "step", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "subgames", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "vm", + "outputs": [ + { + "internalType": "contract IBigStepper", + "name": "vm_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "wasRespectedGameTypeWhenCreated", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "weth", + "outputs": [ + { + "internalType": "contract IDelayedWETH", + "name": "weth_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "enum BondDistributionMode", + "name": "bondDistributionMode", + "type": "uint8" + } + ], + "name": "GameClosed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "parentIndex", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "claimant", + "type": "address" + } + ], + "name": "Move", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum GameStatus", + "name": "status", + "type": "uint8" + } + ], + "name": "Resolved", + "type": "event" + }, + { + "inputs": [], + "name": "AlreadyInitialized", + "type": "error" + }, + { + "inputs": [], + "name": "AnchorRootNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, + { + "inputs": [], + "name": "BlockNumberMatches", + "type": "error" + }, + { + "inputs": [], + "name": "BondTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "CannotDefendRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAboveSplit", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyResolved", + "type": "error" + }, + { + "inputs": [], + "name": "ClockNotExpired", + "type": "error" + }, + { + "inputs": [], + "name": "ClockTimeExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "ContentLengthMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "DuplicateStep", + "type": "error" + }, + { + "inputs": [], + "name": "EmptyItem", + "type": "error" + }, + { + "inputs": [], + "name": "GameDepthExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotFinalized", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotInProgress", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotResolved", + "type": "error" + }, + { + "inputs": [], + "name": "GamePaused", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectBondAmount", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidBondDistributionMode", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidChallengePeriod", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidClockExtension", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDataRemainder", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDisputedClaimIndex", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeader", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeaderRLP", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidLocalIdent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidOutputRootProof", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidParent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPrestate", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidSplitDepth", + "type": "error" + }, + { + "inputs": [], + "name": "L2BlockNumberChallenged", + "type": "error" + }, + { + "inputs": [], + "name": "MaxDepthTooLarge", + "type": "error" + }, + { + "inputs": [], + "name": "NoCreditToClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfOrderResolution", + "type": "error" + }, + { + "inputs": [], + "name": "ReservedGameType", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedList", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "rootClaim", + "type": "bytes32" + } + ], + "name": "UnexpectedRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedString", + "type": "error" + }, + { + "inputs": [], + "name": "ValidStep", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/GameHelper.json b/packages/contracts-bedrock/snapshots/abi/GameHelper.json new file mode 100644 index 0000000000000..f44f1598a20bf --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/GameHelper.json @@ -0,0 +1,97 @@ +[ + { + "stateMutability": "payable", + "type": "receive" + }, + { + "inputs": [ + { + "internalType": "contract IDisputeGameFactory", + "name": "_dgf", + "type": "address" + }, + { + "internalType": "GameType", + "name": "_gameType", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "_rootClaim", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "_extraData", + "type": "bytes" + }, + { + "components": [ + { + "internalType": "uint256", + "name": "parentIdx", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "attack", + "type": "bool" + } + ], + "internalType": "struct GameHelper.Move[]", + "name": "_moves", + "type": "tuple[]" + } + ], + "name": "createGameWithClaims", + "outputs": [ + { + "internalType": "address", + "name": "gameAddr_", + "type": "address" + } + ], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract IFaultDisputeGame", + "name": "_game", + "type": "address" + }, + { + "components": [ + { + "internalType": "uint256", + "name": "parentIdx", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "attack", + "type": "bool" + } + ], + "internalType": "struct GameHelper.Move[]", + "name": "_moves", + "type": "tuple[]" + } + ], + "name": "performMoves", + "outputs": [], + "stateMutability": "payable", + "type": "function" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/L1CrossDomainMessenger.json b/packages/contracts-bedrock/snapshots/abi/L1CrossDomainMessenger.json index 487c45491aac6..bc0174b42fa9c 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1CrossDomainMessenger.json +++ b/packages/contracts-bedrock/snapshots/abi/L1CrossDomainMessenger.json @@ -431,19 +431,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract ISystemConfig", - "name": "_systemConfig", - "type": "address" - } - ], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/abi/L1ERC721Bridge.json b/packages/contracts-bedrock/snapshots/abi/L1ERC721Bridge.json index 7783e2b38ee4f..7b83c924650b6 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1ERC721Bridge.json +++ b/packages/contracts-bedrock/snapshots/abi/L1ERC721Bridge.json @@ -290,19 +290,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract ISystemConfig", - "name": "_systemConfig", - "type": "address" - } - ], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/abi/L1StandardBridge.json b/packages/contracts-bedrock/snapshots/abi/L1StandardBridge.json index 259a0a21bdf5e..78752f602e873 100644 --- a/packages/contracts-bedrock/snapshots/abi/L1StandardBridge.json +++ b/packages/contracts-bedrock/snapshots/abi/L1StandardBridge.json @@ -549,19 +549,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract ISystemConfig", - "name": "_systemConfig", - "type": "address" - } - ], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/abi/LivenessModule2.json b/packages/contracts-bedrock/snapshots/abi/LivenessModule2.json new file mode 100644 index 0000000000000..8a4658b443d8a --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/LivenessModule2.json @@ -0,0 +1,286 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_safe", + "type": "address" + } + ], + "name": "challenge", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "challengeStartTime", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_safe", + "type": "address" + } + ], + "name": "changeOwnershipToFallback", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "clearLivenessModule", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "livenessResponsePeriod", + "type": "uint256" + }, + { + "internalType": "address", + "name": "fallbackOwner", + "type": "address" + } + ], + "internalType": "struct LivenessModule2.ModuleConfig", + "name": "_config", + "type": "tuple" + } + ], + "name": "configureLivenessModule", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_safe", + "type": "address" + } + ], + "name": "getChallengePeriodEnd", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "livenessSafeConfiguration", + "outputs": [ + { + "internalType": "uint256", + "name": "livenessResponsePeriod", + "type": "uint256" + }, + { + "internalType": "address", + "name": "fallbackOwner", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "respond", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "safe", + "type": "address" + } + ], + "name": "ChallengeCancelled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "challengeStartTime", + "type": "uint256" + } + ], + "name": "ChallengeStarted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "address", + "name": "fallbackOwner", + "type": "address" + } + ], + "name": "ChallengeSucceeded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "safe", + "type": "address" + } + ], + "name": "ModuleCleared", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "livenessResponsePeriod", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "address", + "name": "fallbackOwner", + "type": "address" + } + ], + "name": "ModuleConfigured", + "type": "event" + }, + { + "inputs": [], + "name": "LivenessModule2_ChallengeAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ChallengeDoesNotExist", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_InvalidFallbackOwner", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_InvalidResponsePeriod", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ModuleNotConfigured", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ModuleNotEnabled", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ModuleStillEnabled", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_OwnershipTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ResponsePeriodActive", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_ResponsePeriodEnded", + "type": "error" + }, + { + "inputs": [], + "name": "LivenessModule2_UnauthorizedCaller", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json index 752d650aec7f8..b9e54537a4d33 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManager.json @@ -40,16 +40,6 @@ "internalType": "contract IProxyAdmin", "name": "_superchainProxyAdmin", "type": "address" - }, - { - "internalType": "string", - "name": "_l1ContractsRelease", - "type": "string" - }, - { - "internalType": "address", - "name": "_upgradeController", - "type": "address" } ], "stateMutability": "nonpayable", @@ -447,6 +437,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -473,6 +476,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -528,26 +536,19 @@ "type": "function" }, { - "inputs": [], - "name": "isRC", - "outputs": [ + "inputs": [ { - "internalType": "bool", - "name": "", - "type": "bool" + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" } ], - "stateMutability": "view", - "type": "function" - }, - { - "inputs": [], - "name": "l1ContractsRelease", + "name": "isDevFeatureEnabled", "outputs": [ { - "internalType": "string", + "internalType": "bool", "name": "", - "type": "string" + "type": "bool" } ], "stateMutability": "view", @@ -732,19 +733,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "bool", - "name": "_isRC", - "type": "bool" - } - ], - "name": "setRC", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "superchainConfig", @@ -781,17 +769,17 @@ "type": "address" }, { - "internalType": "contract IProxyAdmin", - "name": "proxyAdmin", - "type": "address" + "internalType": "Claim", + "name": "cannonPrestate", + "type": "bytes32" }, { "internalType": "Claim", - "name": "absolutePrestate", + "name": "cannonKonaPrestate", "type": "bytes32" } ], - "internalType": "struct OPContractsManager.OpChainConfig[]", + "internalType": "struct OPContractsManager.UpdatePrestateInput[]", "name": "_prestateUpdateInputs", "type": "tuple[]" } @@ -832,16 +820,21 @@ "type": "function" }, { - "inputs": [], - "name": "upgradeController", - "outputs": [ + "inputs": [ { - "internalType": "address", - "name": "", + "internalType": "contract ISuperchainConfig", + "name": "_superchainConfig", + "type": "address" + }, + { + "internalType": "contract IProxyAdmin", + "name": "_superchainProxyAdmin", "type": "address" } ], - "stateMutability": "view", + "name": "upgradeSuperchainConfig", + "outputs": [], + "stateMutability": "nonpayable", "type": "function" }, { @@ -966,19 +959,6 @@ "stateMutability": "pure", "type": "function" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "bool", - "name": "_isRC", - "type": "bool" - } - ], - "name": "Released", - "type": "event" - }, { "inputs": [ { @@ -1042,11 +1022,6 @@ "name": "OnlyDelegatecall", "type": "error" }, - { - "inputs": [], - "name": "OnlyUpgradeController", - "type": "error" - }, { "inputs": [], "name": "PrestateNotSet", diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerContractsContainer.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerContractsContainer.json index c1383b7dd77fa..c49484fc7224c 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerContractsContainer.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerContractsContainer.json @@ -95,6 +95,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -144,11 +149,29 @@ "internalType": "struct OPContractsManager.Implementations", "name": "_implementations", "type": "tuple" + }, + { + "internalType": "bytes32", + "name": "_devFeatureBitmap", + "type": "bytes32" } ], "stateMutability": "nonpayable", "type": "constructor" }, + { + "inputs": [], + "name": "_isTestingEnvironment", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "blueprints", @@ -229,6 +252,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -255,6 +291,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -308,5 +349,29 @@ ], "stateMutability": "view", "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + } + ], + "name": "isDevFeatureEnabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "OPContractsManagerContractsContainer_DevFeatureInProd", + "type": "error" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerDeployer.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerDeployer.json index 81fd1cef7bb3f..7cd7a44502c06 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerDeployer.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerDeployer.json @@ -340,6 +340,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -366,6 +379,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -420,6 +438,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + } + ], + "name": "isDevFeatureEnabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "anonymous": false, "inputs": [ diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerGameTypeAdder.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerGameTypeAdder.json index bcce3c57daeea..6bf4393418702 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerGameTypeAdder.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerGameTypeAdder.json @@ -233,6 +233,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -259,6 +272,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -313,6 +331,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + } + ], + "name": "isDevFeatureEnabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -323,17 +360,17 @@ "type": "address" }, { - "internalType": "contract IProxyAdmin", - "name": "proxyAdmin", - "type": "address" + "internalType": "Claim", + "name": "cannonPrestate", + "type": "bytes32" }, { "internalType": "Claim", - "name": "absolutePrestate", + "name": "cannonKonaPrestate", "type": "bytes32" } ], - "internalType": "struct OPContractsManager.OpChainConfig[]", + "internalType": "struct OPContractsManager.UpdatePrestateInput[]", "name": "_prestateUpdateInputs", "type": "tuple[]" } diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInteropMigrator.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInteropMigrator.json index be5788a90227f..b06cd541bb38d 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInteropMigrator.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerInteropMigrator.json @@ -135,6 +135,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -161,6 +174,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -215,6 +233,25 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + } + ], + "name": "isDevFeatureEnabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerStandardValidator.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerStandardValidator.json index f58f77eb2b9e0..7ec9ab0bc717f 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerStandardValidator.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerStandardValidator.json @@ -13,6 +13,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -82,6 +87,11 @@ "internalType": "uint256", "name": "_withdrawalDelaySeconds", "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "_devFeatureBitmap", + "type": "bytes32" } ], "stateMutability": "nonpayable", @@ -100,19 +110,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "anchorStateRegistryVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "challenger", @@ -141,15 +138,15 @@ }, { "inputs": [], - "name": "delayedWETHVersion", + "name": "devFeatureBitmap", "outputs": [ { - "internalType": "string", + "internalType": "bytes32", "name": "", - "type": "string" + "type": "bytes32" } ], - "stateMutability": "pure", + "stateMutability": "view", "type": "function" }, { @@ -165,19 +162,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "disputeGameFactoryVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "ethLockboxImpl", @@ -191,19 +175,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "ethLockboxVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "l1CrossDomainMessengerImpl", @@ -217,19 +188,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "l1CrossDomainMessengerVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "l1ERC721BridgeImpl", @@ -243,19 +201,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "l1ERC721BridgeVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "l1PAOMultisig", @@ -282,19 +227,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "l1StandardBridgeVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "mipsImpl", @@ -308,19 +240,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "mipsVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "optimismMintableERC20FactoryImpl", @@ -334,19 +253,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "optimismMintableERC20FactoryVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [], "name": "optimismPortalImpl", @@ -362,15 +268,15 @@ }, { "inputs": [], - "name": "optimismPortalVersion", + "name": "optimismPortalInteropImpl", "outputs": [ { - "internalType": "string", + "internalType": "address", "name": "", - "type": "string" + "type": "address" } ], - "stateMutability": "pure", + "stateMutability": "view", "type": "function" }, { @@ -425,19 +331,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "systemConfigVersion", - "outputs": [ - { - "internalType": "string", - "name": "", - "type": "string" - } - ], - "stateMutability": "pure", - "type": "function" - }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUpgrader.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUpgrader.json index f6c3e32692dca..512a83ae75cff 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUpgrader.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUpgrader.json @@ -135,6 +135,19 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "devFeatureBitmap", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "implementations", @@ -161,6 +174,11 @@ "name": "optimismPortalImpl", "type": "address" }, + { + "internalType": "address", + "name": "optimismPortalInteropImpl", + "type": "address" + }, { "internalType": "address", "name": "ethLockboxImpl", @@ -218,15 +236,24 @@ { "inputs": [ { - "internalType": "contract ISuperchainConfig", - "name": "_superchainConfig", - "type": "address" - }, + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + } + ], + "name": "isDevFeatureEnabled", + "outputs": [ { - "internalType": "contract IProxyAdmin", - "name": "_superchainProxyAdmin", - "type": "address" - }, + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ { "components": [ { @@ -255,6 +282,24 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "contract ISuperchainConfig", + "name": "_superchainConfig", + "type": "address" + }, + { + "internalType": "contract IProxyAdmin", + "name": "_superchainProxyAdmin", + "type": "address" + } + ], + "name": "upgradeSuperchainConfig", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "anonymous": false, "inputs": [ @@ -316,11 +361,27 @@ "name": "NotABlueprint", "type": "error" }, + { + "inputs": [], + "name": "OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate", + "type": "error" + }, { "inputs": [], "name": "OPContractsManagerUpgrader_SuperchainConfigMismatch", "type": "error" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "OPContractsManagerUpgrader_SuperchainConfigNeedsUpgrade", + "type": "error" + }, { "inputs": [], "name": "OPContractsManager_InvalidGameType", @@ -336,6 +397,11 @@ "name": "ReservedBitsSet", "type": "error" }, + { + "inputs": [], + "name": "SemverComp_InvalidSemverParts", + "type": "error" + }, { "inputs": [ { diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismPortal2.json b/packages/contracts-bedrock/snapshots/abi/OptimismPortal2.json index 39d02adf5ec39..49ae551310b01 100644 --- a/packages/contracts-bedrock/snapshots/abi/OptimismPortal2.json +++ b/packages/contracts-bedrock/snapshots/abi/OptimismPortal2.json @@ -294,11 +294,6 @@ "internalType": "contract IAnchorStateRegistry", "name": "_anchorStateRegistry", "type": "address" - }, - { - "internalType": "contract IETHLockbox", - "name": "_ethLockbox", - "type": "address" } ], "name": "initialize", @@ -319,31 +314,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "migrateLiquidity", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, - { - "inputs": [ - { - "internalType": "contract IETHLockbox", - "name": "_newLockbox", - "type": "address" - }, - { - "internalType": "contract IAnchorStateRegistry", - "name": "_newAnchorStateRegistry", - "type": "address" - } - ], - "name": "migrateToSuperRoots", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [ { @@ -537,127 +507,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [ - { - "components": [ - { - "internalType": "uint256", - "name": "nonce", - "type": "uint256" - }, - { - "internalType": "address", - "name": "sender", - "type": "address" - }, - { - "internalType": "address", - "name": "target", - "type": "address" - }, - { - "internalType": "uint256", - "name": "value", - "type": "uint256" - }, - { - "internalType": "uint256", - "name": "gasLimit", - "type": "uint256" - }, - { - "internalType": "bytes", - "name": "data", - "type": "bytes" - } - ], - "internalType": "struct Types.WithdrawalTransaction", - "name": "_tx", - "type": "tuple" - }, - { - "internalType": "contract IDisputeGame", - "name": "_disputeGameProxy", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_outputRootIndex", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "bytes1", - "name": "version", - "type": "bytes1" - }, - { - "internalType": "uint64", - "name": "timestamp", - "type": "uint64" - }, - { - "components": [ - { - "internalType": "uint256", - "name": "chainId", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "root", - "type": "bytes32" - } - ], - "internalType": "struct Types.OutputRootWithChainId[]", - "name": "outputRoots", - "type": "tuple[]" - } - ], - "internalType": "struct Types.SuperRootProof", - "name": "_superRootProof", - "type": "tuple" - }, - { - "components": [ - { - "internalType": "bytes32", - "name": "version", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "stateRoot", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "messagePasserStorageRoot", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "latestBlockhash", - "type": "bytes32" - } - ], - "internalType": "struct Types.OutputRootProof", - "name": "_outputRootProof", - "type": "tuple" - }, - { - "internalType": "bytes[]", - "name": "_withdrawalProof", - "type": "bytes[]" - } - ], - "name": "proveWithdrawalTransaction", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [ { @@ -739,19 +588,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [], - "name": "superRootsActive", - "outputs": [ - { - "internalType": "bool", - "name": "", - "type": "bool" - } - ], - "stateMutability": "view", - "type": "function" - }, { "inputs": [], "name": "superchainConfig", @@ -778,24 +614,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "contract IAnchorStateRegistry", - "name": "_anchorStateRegistry", - "type": "address" - }, - { - "internalType": "contract IETHLockbox", - "name": "_ethLockbox", - "type": "address" - } - ], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", @@ -809,25 +627,6 @@ "stateMutability": "pure", "type": "function" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": true, - "internalType": "address", - "name": "lockbox", - "type": "address" - }, - { - "indexed": false, - "internalType": "uint256", - "name": "ethBalance", - "type": "uint256" - } - ], - "name": "ETHMigrated", - "type": "event" - }, { "anonymous": false, "inputs": [ @@ -841,37 +640,6 @@ "name": "Initialized", "type": "event" }, - { - "anonymous": false, - "inputs": [ - { - "indexed": false, - "internalType": "contract IETHLockbox", - "name": "oldLockbox", - "type": "address" - }, - { - "indexed": false, - "internalType": "contract IETHLockbox", - "name": "newLockbox", - "type": "address" - }, - { - "indexed": false, - "internalType": "contract IAnchorStateRegistry", - "name": "oldAnchorStateRegistry", - "type": "address" - }, - { - "indexed": false, - "internalType": "contract IAnchorStateRegistry", - "name": "newAnchorStateRegistry", - "type": "address" - } - ], - "name": "PortalMigrated", - "type": "event" - }, { "anonymous": false, "inputs": [ @@ -976,16 +744,6 @@ "name": "EmptyItem", "type": "error" }, - { - "inputs": [], - "name": "Encoding_EmptySuperRoot", - "type": "error" - }, - { - "inputs": [], - "name": "Encoding_InvalidSuperRootVersion", - "type": "error" - }, { "inputs": [], "name": "InvalidDataRemainder", @@ -1038,17 +796,12 @@ }, { "inputs": [], - "name": "OptimismPortal_InvalidMerkleProof", - "type": "error" - }, - { - "inputs": [], - "name": "OptimismPortal_InvalidOutputRootChainId", + "name": "OptimismPortal_InvalidLockboxState", "type": "error" }, { "inputs": [], - "name": "OptimismPortal_InvalidOutputRootIndex", + "name": "OptimismPortal_InvalidMerkleProof", "type": "error" }, { @@ -1066,16 +819,6 @@ "name": "OptimismPortal_InvalidRootClaim", "type": "error" }, - { - "inputs": [], - "name": "OptimismPortal_InvalidSuperRootProof", - "type": "error" - }, - { - "inputs": [], - "name": "OptimismPortal_MigratingToSameRegistry", - "type": "error" - }, { "inputs": [], "name": "OptimismPortal_NoReentrancy", @@ -1086,21 +829,11 @@ "name": "OptimismPortal_ProofNotOldEnough", "type": "error" }, - { - "inputs": [], - "name": "OptimismPortal_Unauthorized", - "type": "error" - }, { "inputs": [], "name": "OptimismPortal_Unproven", "type": "error" }, - { - "inputs": [], - "name": "OptimismPortal_WrongProofMethod", - "type": "error" - }, { "inputs": [], "name": "OutOfGas", diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismPortalInterop.json b/packages/contracts-bedrock/snapshots/abi/OptimismPortalInterop.json new file mode 100644 index 0000000000000..d01d85015e416 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/OptimismPortalInterop.json @@ -0,0 +1,1149 @@ +[ + { + "inputs": [ + { + "internalType": "uint256", + "name": "_proofMaturityDelaySeconds", + "type": "uint256" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "stateMutability": "payable", + "type": "receive" + }, + { + "inputs": [], + "name": "anchorStateRegistry", + "outputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_withdrawalHash", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "_proofSubmitter", + "type": "address" + } + ], + "name": "checkWithdrawal", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_value", + "type": "uint256" + }, + { + "internalType": "uint64", + "name": "_gasLimit", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "_isCreation", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_data", + "type": "bytes" + } + ], + "name": "depositTransaction", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract IDisputeGame", + "name": "_disputeGame", + "type": "address" + } + ], + "name": "disputeGameBlacklist", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "disputeGameFactory", + "outputs": [ + { + "internalType": "contract IDisputeGameFactory", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "disputeGameFinalityDelaySeconds", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "donateETH", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "ethLockbox", + "outputs": [ + { + "internalType": "contract IETHLockbox", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "internalType": "struct Types.WithdrawalTransaction", + "name": "_tx", + "type": "tuple" + } + ], + "name": "finalizeWithdrawalTransaction", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "internalType": "struct Types.WithdrawalTransaction", + "name": "_tx", + "type": "tuple" + }, + { + "internalType": "address", + "name": "_proofSubmitter", + "type": "address" + } + ], + "name": "finalizeWithdrawalTransactionExternalProof", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "name": "finalizedWithdrawals", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "guardian", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initVersion", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract ISystemConfig", + "name": "_systemConfig", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "contract IETHLockbox", + "name": "_ethLockbox", + "type": "address" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "l2Sender", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "migrateLiquidity", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract IETHLockbox", + "name": "_newLockbox", + "type": "address" + }, + { + "internalType": "contract IAnchorStateRegistry", + "name": "_newAnchorStateRegistry", + "type": "address" + } + ], + "name": "migrateToSuperRoots", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "_byteCount", + "type": "uint64" + } + ], + "name": "minimumGasLimit", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_withdrawalHash", + "type": "bytes32" + } + ], + "name": "numProofSubmitters", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "params", + "outputs": [ + { + "internalType": "uint128", + "name": "prevBaseFee", + "type": "uint128" + }, + { + "internalType": "uint64", + "name": "prevBoughtGas", + "type": "uint64" + }, + { + "internalType": "uint64", + "name": "prevBlockNum", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "paused", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "proofMaturityDelaySeconds", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "proofSubmitters", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "internalType": "struct Types.WithdrawalTransaction", + "name": "_tx", + "type": "tuple" + }, + { + "internalType": "uint256", + "name": "_disputeGameIndex", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "version", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "stateRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "messagePasserStorageRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "latestBlockhash", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootProof", + "name": "_outputRootProof", + "type": "tuple" + }, + { + "internalType": "bytes[]", + "name": "_withdrawalProof", + "type": "bytes[]" + } + ], + "name": "proveWithdrawalTransaction", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "nonce", + "type": "uint256" + }, + { + "internalType": "address", + "name": "sender", + "type": "address" + }, + { + "internalType": "address", + "name": "target", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasLimit", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + } + ], + "internalType": "struct Types.WithdrawalTransaction", + "name": "_tx", + "type": "tuple" + }, + { + "internalType": "contract IDisputeGame", + "name": "_disputeGameProxy", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_outputRootIndex", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "version", + "type": "bytes1" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + }, + { + "components": [ + { + "internalType": "uint256", + "name": "chainId", + "type": "uint256" + }, + { + "internalType": "bytes32", + "name": "root", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootWithChainId[]", + "name": "outputRoots", + "type": "tuple[]" + } + ], + "internalType": "struct Types.SuperRootProof", + "name": "_superRootProof", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "version", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "stateRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "messagePasserStorageRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "latestBlockhash", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootProof", + "name": "_outputRootProof", + "type": "tuple" + }, + { + "internalType": "bytes[]", + "name": "_withdrawalProof", + "type": "bytes[]" + } + ], + "name": "proveWithdrawalTransaction", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + }, + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "provenWithdrawals", + "outputs": [ + { + "internalType": "contract IDisputeGame", + "name": "disputeGameProxy", + "type": "address" + }, + { + "internalType": "uint64", + "name": "timestamp", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "proxyAdmin", + "outputs": [ + { + "internalType": "contract IProxyAdmin", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "proxyAdminOwner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "respectedGameType", + "outputs": [ + { + "internalType": "GameType", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "respectedGameTypeUpdatedAt", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "superRootsActive", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "superchainConfig", + "outputs": [ + { + "internalType": "contract ISuperchainConfig", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "systemConfig", + "outputs": [ + { + "internalType": "contract ISystemConfig", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "_anchorStateRegistry", + "type": "address" + }, + { + "internalType": "contract IETHLockbox", + "name": "_ethLockbox", + "type": "address" + } + ], + "name": "upgrade", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "lockbox", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "ethBalance", + "type": "uint256" + } + ], + "name": "ETHMigrated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "version", + "type": "uint8" + } + ], + "name": "Initialized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "contract IETHLockbox", + "name": "oldLockbox", + "type": "address" + }, + { + "indexed": false, + "internalType": "contract IETHLockbox", + "name": "newLockbox", + "type": "address" + }, + { + "indexed": false, + "internalType": "contract IAnchorStateRegistry", + "name": "oldAnchorStateRegistry", + "type": "address" + }, + { + "indexed": false, + "internalType": "contract IAnchorStateRegistry", + "name": "newAnchorStateRegistry", + "type": "address" + } + ], + "name": "PortalMigrated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "indexed": true, + "internalType": "uint256", + "name": "version", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "opaqueData", + "type": "bytes" + } + ], + "name": "TransactionDeposited", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "withdrawalHash", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "bool", + "name": "success", + "type": "bool" + } + ], + "name": "WithdrawalFinalized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "withdrawalHash", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "from", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "to", + "type": "address" + } + ], + "name": "WithdrawalProven", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "withdrawalHash", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "proofSubmitter", + "type": "address" + } + ], + "name": "WithdrawalProvenExtension1", + "type": "event" + }, + { + "inputs": [], + "name": "ContentLengthMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "EmptyItem", + "type": "error" + }, + { + "inputs": [], + "name": "Encoding_EmptySuperRoot", + "type": "error" + }, + { + "inputs": [], + "name": "Encoding_InvalidSuperRootVersion", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDataRemainder", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeader", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_AlreadyFinalized", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_BadTarget", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_CallPaused", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_CalldataTooLarge", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_GasEstimation", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_GasLimitTooLow", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_ImproperDisputeGame", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidDisputeGame", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidMerkleProof", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidOutputRootChainId", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidOutputRootIndex", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidOutputRootProof", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidProofTimestamp", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_InvalidSuperRootProof", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_MigratingToSameRegistry", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_NoReentrancy", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_ProofNotOldEnough", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_Unproven", + "type": "error" + }, + { + "inputs": [], + "name": "OptimismPortal_WrongProofMethod", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfGas", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_NotProxyAdmin", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_NotProxyAdminOwner", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_NotResolvedDelegateProxy", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_NotSharedProxyAdminOwner", + "type": "error" + }, + { + "inputs": [], + "name": "ProxyAdminOwnedBase_ProxyAdminNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "ReinitializableBase_ZeroInitVersion", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedList", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedString", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json index 7548b6d348473..8bb88f4663986 100644 --- a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGame.json @@ -1057,6 +1057,11 @@ "name": "BadAuth", "type": "error" }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, { "inputs": [], "name": "BlockNumberMatches", diff --git a/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json new file mode 100644 index 0000000000000..23dbddadfc085 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/PermissionedDisputeGameV2.json @@ -0,0 +1,1226 @@ +[ + { + "inputs": [ + { + "components": [ + { + "internalType": "GameType", + "name": "gameType", + "type": "uint32" + }, + { + "internalType": "uint256", + "name": "maxGameDepth", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "splitDepth", + "type": "uint256" + }, + { + "internalType": "Duration", + "name": "clockExtension", + "type": "uint64" + }, + { + "internalType": "Duration", + "name": "maxClockDuration", + "type": "uint64" + } + ], + "internalType": "struct FaultDisputeGameV2.GameConstructorParams", + "name": "_params", + "type": "tuple" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "absolutePrestate", + "outputs": [ + { + "internalType": "Claim", + "name": "absolutePrestate_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_ident", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_execLeafIdx", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_partOffset", + "type": "uint256" + } + ], + "name": "addLocalData", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "anchorStateRegistry", + "outputs": [ + { + "internalType": "contract IAnchorStateRegistry", + "name": "registry_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "attack", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "bondDistributionMode", + "outputs": [ + { + "internalType": "enum BondDistributionMode", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "bytes32", + "name": "version", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "stateRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "messagePasserStorageRoot", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "latestBlockhash", + "type": "bytes32" + } + ], + "internalType": "struct Types.OutputRootProof", + "name": "_outputRootProof", + "type": "tuple" + }, + { + "internalType": "bytes", + "name": "_headerRLP", + "type": "bytes" + } + ], + "name": "challengeRootL2Block", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "challenger", + "outputs": [ + { + "internalType": "address", + "name": "challenger_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "claimCredit", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "claimData", + "outputs": [ + { + "internalType": "uint32", + "name": "parentIndex", + "type": "uint32" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + }, + { + "internalType": "address", + "name": "claimant", + "type": "address" + }, + { + "internalType": "uint128", + "name": "bond", + "type": "uint128" + }, + { + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "internalType": "Position", + "name": "position", + "type": "uint128" + }, + { + "internalType": "Clock", + "name": "clock", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "claimDataLen", + "outputs": [ + { + "internalType": "uint256", + "name": "len_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Hash", + "name": "", + "type": "bytes32" + } + ], + "name": "claims", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "clockExtension", + "outputs": [ + { + "internalType": "Duration", + "name": "clockExtension_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "closeGame", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "createdAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_recipient", + "type": "address" + } + ], + "name": "credit", + "outputs": [ + { + "internalType": "uint256", + "name": "credit_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_parentIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + } + ], + "name": "defend", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "extraData", + "outputs": [ + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameCreator", + "outputs": [ + { + "internalType": "address", + "name": "creator_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "gameData", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + }, + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + }, + { + "internalType": "bytes", + "name": "extraData_", + "type": "bytes" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "gameType", + "outputs": [ + { + "internalType": "GameType", + "name": "gameType_", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getChallengerDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "duration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + } + ], + "name": "getNumToResolve", + "outputs": [ + { + "internalType": "uint256", + "name": "numRemainingChildren_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Position", + "name": "_position", + "type": "uint128" + } + ], + "name": "getRequiredBond", + "outputs": [ + { + "internalType": "uint256", + "name": "requiredBond_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "hasUnlockedCredit", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "initialize", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "l1Head", + "outputs": [ + { + "internalType": "Hash", + "name": "l1Head_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2BlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenged", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2BlockNumberChallenger", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "l2ChainId", + "outputs": [ + { + "internalType": "uint256", + "name": "l2ChainId_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "l2SequenceNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "l2SequenceNumber_", + "type": "uint256" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "maxClockDuration", + "outputs": [ + { + "internalType": "Duration", + "name": "maxClockDuration_", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxGameDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "maxGameDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "_disputed", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_challengeIndex", + "type": "uint256" + }, + { + "internalType": "Claim", + "name": "_claim", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + } + ], + "name": "move", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "normalModeCredit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "proposer", + "outputs": [ + { + "internalType": "address", + "name": "proposer_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "refundModeCredit", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolutionCheckpoints", + "outputs": [ + { + "internalType": "bool", + "name": "initialCheckpointComplete", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "subgameIndex", + "type": "uint32" + }, + { + "internalType": "Position", + "name": "leftmostPosition", + "type": "uint128" + }, + { + "internalType": "address", + "name": "counteredBy", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "resolve", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "status_", + "type": "uint8" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_numToResolve", + "type": "uint256" + } + ], + "name": "resolveClaim", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "resolvedAt", + "outputs": [ + { + "internalType": "Timestamp", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "resolvedSubgames", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "rootClaim", + "outputs": [ + { + "internalType": "Claim", + "name": "rootClaim_", + "type": "bytes32" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "splitDepth", + "outputs": [ + { + "internalType": "uint256", + "name": "splitDepth_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingBlockNumber", + "outputs": [ + { + "internalType": "uint256", + "name": "startingBlockNumber_", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingOutputRoot", + "outputs": [ + { + "internalType": "Hash", + "name": "root", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "l2SequenceNumber", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "startingRootHash", + "outputs": [ + { + "internalType": "Hash", + "name": "startingRootHash_", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "status", + "outputs": [ + { + "internalType": "enum GameStatus", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_claimIndex", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "_isAttack", + "type": "bool" + }, + { + "internalType": "bytes", + "name": "_stateData", + "type": "bytes" + }, + { + "internalType": "bytes", + "name": "_proof", + "type": "bytes" + } + ], + "name": "step", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "subgames", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "vm", + "outputs": [ + { + "internalType": "contract IBigStepper", + "name": "vm_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "inputs": [], + "name": "wasRespectedGameTypeWhenCreated", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "weth", + "outputs": [ + { + "internalType": "contract IDelayedWETH", + "name": "weth_", + "type": "address" + } + ], + "stateMutability": "pure", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "enum BondDistributionMode", + "name": "bondDistributionMode", + "type": "uint8" + } + ], + "name": "GameClosed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "parentIndex", + "type": "uint256" + }, + { + "indexed": true, + "internalType": "Claim", + "name": "claim", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "address", + "name": "claimant", + "type": "address" + } + ], + "name": "Move", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "enum GameStatus", + "name": "status", + "type": "uint8" + } + ], + "name": "Resolved", + "type": "event" + }, + { + "inputs": [], + "name": "AlreadyInitialized", + "type": "error" + }, + { + "inputs": [], + "name": "AnchorRootNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "BadAuth", + "type": "error" + }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, + { + "inputs": [], + "name": "BlockNumberMatches", + "type": "error" + }, + { + "inputs": [], + "name": "BondTransferFailed", + "type": "error" + }, + { + "inputs": [], + "name": "CannotDefendRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAboveSplit", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyExists", + "type": "error" + }, + { + "inputs": [], + "name": "ClaimAlreadyResolved", + "type": "error" + }, + { + "inputs": [], + "name": "ClockNotExpired", + "type": "error" + }, + { + "inputs": [], + "name": "ClockTimeExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "ContentLengthMismatch", + "type": "error" + }, + { + "inputs": [], + "name": "DuplicateStep", + "type": "error" + }, + { + "inputs": [], + "name": "EmptyItem", + "type": "error" + }, + { + "inputs": [], + "name": "GameDepthExceeded", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotFinalized", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotInProgress", + "type": "error" + }, + { + "inputs": [], + "name": "GameNotResolved", + "type": "error" + }, + { + "inputs": [], + "name": "GamePaused", + "type": "error" + }, + { + "inputs": [], + "name": "IncorrectBondAmount", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidBondDistributionMode", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidChallengePeriod", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidClockExtension", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDataRemainder", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidDisputedClaimIndex", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeader", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidHeaderRLP", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidLocalIdent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidOutputRootProof", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidParent", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidPrestate", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidSplitDepth", + "type": "error" + }, + { + "inputs": [], + "name": "L2BlockNumberChallenged", + "type": "error" + }, + { + "inputs": [], + "name": "MaxDepthTooLarge", + "type": "error" + }, + { + "inputs": [], + "name": "NoCreditToClaim", + "type": "error" + }, + { + "inputs": [], + "name": "OutOfOrderResolution", + "type": "error" + }, + { + "inputs": [], + "name": "ReservedGameType", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedList", + "type": "error" + }, + { + "inputs": [ + { + "internalType": "Claim", + "name": "rootClaim", + "type": "bytes32" + } + ], + "name": "UnexpectedRootClaim", + "type": "error" + }, + { + "inputs": [], + "name": "UnexpectedString", + "type": "error" + }, + { + "inputs": [], + "name": "ValidStep", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/SuperFaultDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/SuperFaultDisputeGame.json index b746003f132ef..6ea8b243ed648 100644 --- a/packages/contracts-bedrock/snapshots/abi/SuperFaultDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/SuperFaultDisputeGame.json @@ -924,6 +924,11 @@ "name": "AnchorRootNotFound", "type": "error" }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, { "inputs": [], "name": "BondTransferFailed", diff --git a/packages/contracts-bedrock/snapshots/abi/SuperPermissionedDisputeGame.json b/packages/contracts-bedrock/snapshots/abi/SuperPermissionedDisputeGame.json index 17c01e9593fa6..b9988791a0f77 100644 --- a/packages/contracts-bedrock/snapshots/abi/SuperPermissionedDisputeGame.json +++ b/packages/contracts-bedrock/snapshots/abi/SuperPermissionedDisputeGame.json @@ -965,6 +965,11 @@ "name": "BadAuth", "type": "error" }, + { + "inputs": [], + "name": "BadExtraData", + "type": "error" + }, { "inputs": [], "name": "BondTransferFailed", diff --git a/packages/contracts-bedrock/snapshots/abi/SuperchainConfig.json b/packages/contracts-bedrock/snapshots/abi/SuperchainConfig.json index ed7ad5c10f978..e430da3ccca4a 100644 --- a/packages/contracts-bedrock/snapshots/abi/SuperchainConfig.json +++ b/packages/contracts-bedrock/snapshots/abi/SuperchainConfig.json @@ -210,13 +210,6 @@ "stateMutability": "nonpayable", "type": "function" }, - { - "inputs": [], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", diff --git a/packages/contracts-bedrock/snapshots/abi/SystemConfig.json b/packages/contracts-bedrock/snapshots/abi/SystemConfig.json index a295b986db223..10a956622f8f5 100644 --- a/packages/contracts-bedrock/snapshots/abi/SystemConfig.json +++ b/packages/contracts-bedrock/snapshots/abi/SystemConfig.json @@ -413,6 +413,25 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "name": "isFeatureEnabled", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "l1CrossDomainMessenger", @@ -478,6 +497,19 @@ "stateMutability": "pure", "type": "function" }, + { + "inputs": [], + "name": "minBaseFee", + "outputs": [ + { + "internalType": "uint64", + "name": "", + "type": "uint64" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "minimumGasLimit", @@ -704,6 +736,24 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_feature", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_enabled", + "type": "bool" + } + ], + "name": "setFeature", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -753,6 +803,19 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint64", + "name": "_minBaseFee", + "type": "uint64" + } + ], + "name": "setMinBaseFee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -836,24 +899,6 @@ "stateMutability": "view", "type": "function" }, - { - "inputs": [ - { - "internalType": "uint256", - "name": "_l2ChainId", - "type": "uint256" - }, - { - "internalType": "contract ISuperchainConfig", - "name": "_superchainConfig", - "type": "address" - } - ], - "name": "upgrade", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - }, { "inputs": [], "name": "version", @@ -892,6 +937,25 @@ "name": "ConfigUpdate", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "bytes32", + "name": "feature", + "type": "bytes32" + }, + { + "indexed": true, + "internalType": "bool", + "name": "enabled", + "type": "bool" + } + ], + "name": "FeatureSet", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -958,5 +1022,10 @@ "inputs": [], "name": "ReinitializableBase_ZeroInitVersion", "type": "error" + }, + { + "inputs": [], + "name": "SystemConfig_InvalidFeatureState", + "type": "error" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/abi/TimelockGuard.json b/packages/contracts-bedrock/snapshots/abi/TimelockGuard.json new file mode 100644 index 0000000000000..15145753d43b0 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/TimelockGuard.json @@ -0,0 +1,623 @@ +[ + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "_txHash", + "type": "bytes32" + }, + { + "internalType": "uint256", + "name": "_nonce", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "_signatures", + "type": "bytes" + } + ], + "name": "cancelTransaction", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + } + ], + "name": "cancellationThreshold", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "_txHash", + "type": "bytes32" + }, + { + "internalType": "bool", + "name": "_success", + "type": "bool" + } + ], + "name": "checkAfterExecution", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_value", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "_data", + "type": "bytes" + }, + { + "internalType": "enum Enum.Operation", + "name": "_operation", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "_safeTxGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_baseGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "_gasPrice", + "type": "uint256" + }, + { + "internalType": "address", + "name": "_gasToken", + "type": "address" + }, + { + "internalType": "address payable", + "name": "_refundReceiver", + "type": "address" + }, + { + "internalType": "bytes", + "name": "", + "type": "bytes" + }, + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "checkTransaction", + "outputs": [], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "_timelockDelay", + "type": "uint256" + } + ], + "name": "configureTimelockGuard", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + } + ], + "name": "maxCancellationThreshold", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + } + ], + "name": "pendingTransactions", + "outputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "executionTime", + "type": "uint256" + }, + { + "internalType": "enum TimelockGuard.TransactionState", + "name": "state", + "type": "uint8" + }, + { + "components": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + }, + { + "internalType": "enum Enum.Operation", + "name": "operation", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "safeTxGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "baseGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasPrice", + "type": "uint256" + }, + { + "internalType": "address", + "name": "gasToken", + "type": "address" + }, + { + "internalType": "address payable", + "name": "refundReceiver", + "type": "address" + } + ], + "internalType": "struct TimelockGuard.ExecTransactionParams", + "name": "params", + "type": "tuple" + } + ], + "internalType": "struct TimelockGuard.ScheduledTransaction[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_nonce", + "type": "uint256" + }, + { + "components": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + }, + { + "internalType": "enum Enum.Operation", + "name": "operation", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "safeTxGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "baseGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasPrice", + "type": "uint256" + }, + { + "internalType": "address", + "name": "gasToken", + "type": "address" + }, + { + "internalType": "address payable", + "name": "refundReceiver", + "type": "address" + } + ], + "internalType": "struct TimelockGuard.ExecTransactionParams", + "name": "_params", + "type": "tuple" + }, + { + "internalType": "bytes", + "name": "_signatures", + "type": "bytes" + } + ], + "name": "scheduleTransaction", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + }, + { + "internalType": "bytes32", + "name": "_txHash", + "type": "bytes32" + } + ], + "name": "scheduledTransaction", + "outputs": [ + { + "components": [ + { + "internalType": "uint256", + "name": "executionTime", + "type": "uint256" + }, + { + "internalType": "enum TimelockGuard.TransactionState", + "name": "state", + "type": "uint8" + }, + { + "components": [ + { + "internalType": "address", + "name": "to", + "type": "address" + }, + { + "internalType": "uint256", + "name": "value", + "type": "uint256" + }, + { + "internalType": "bytes", + "name": "data", + "type": "bytes" + }, + { + "internalType": "enum Enum.Operation", + "name": "operation", + "type": "uint8" + }, + { + "internalType": "uint256", + "name": "safeTxGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "baseGas", + "type": "uint256" + }, + { + "internalType": "uint256", + "name": "gasPrice", + "type": "uint256" + }, + { + "internalType": "address", + "name": "gasToken", + "type": "address" + }, + { + "internalType": "address payable", + "name": "refundReceiver", + "type": "address" + } + ], + "internalType": "struct TimelockGuard.ExecTransactionParams", + "name": "params", + "type": "tuple" + } + ], + "internalType": "struct TimelockGuard.ScheduledTransaction", + "name": "", + "type": "tuple" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "name": "signCancellation", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "contract GnosisSafe", + "name": "_safe", + "type": "address" + } + ], + "name": "timelockConfiguration", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "contract GnosisSafe", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "oldThreshold", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newThreshold", + "type": "uint256" + } + ], + "name": "CancellationThresholdUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "contract GnosisSafe", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "timelockDelay", + "type": "uint256" + } + ], + "name": "GuardConfigured", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "Message", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "contract GnosisSafe", + "name": "safe", + "type": "address" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "txHash", + "type": "bytes32" + } + ], + "name": "TransactionCancelled", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "contract GnosisSafe", + "name": "safe", + "type": "address" + }, + { + "indexed": false, + "internalType": "bytes32", + "name": "txHash", + "type": "bytes32" + } + ], + "name": "TransactionExecuted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "contract GnosisSafe", + "name": "safe", + "type": "address" + }, + { + "indexed": true, + "internalType": "bytes32", + "name": "txHash", + "type": "bytes32" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "executionTime", + "type": "uint256" + } + ], + "name": "TransactionScheduled", + "type": "event" + }, + { + "inputs": [], + "name": "SemverComp_InvalidSemverParts", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_GuardNotConfigured", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_GuardNotEnabled", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_InvalidTimelockDelay", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_InvalidVersion", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_TransactionAlreadyCancelled", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_TransactionAlreadyExecuted", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_TransactionAlreadyScheduled", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_TransactionNotReady", + "type": "error" + }, + { + "inputs": [], + "name": "TimelockGuard_TransactionNotScheduled", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index e5d428f03f23a..dd7d4fd380188 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -8,40 +8,44 @@ "sourceCodeHash": "0x6c9d3e2dee44c234d59ab93b6564536dfd807f1c4a02a82d5393bc53cb15b8b7" }, "src/L1/L1CrossDomainMessenger.sol:L1CrossDomainMessenger": { - "initCodeHash": "0x117e4126f2accbcd0c4de00b8d19f522e76396dd39145b4c2e2b4f9dfa1b03ef", - "sourceCodeHash": "0x66b6e4d41c40efcc50b644d22d736408e28a73a6b55b18fcbb89a83bd3230d53" + "initCodeHash": "0x3dc659aafb03bd357f92abfc6794af89ee0ddd5212364551637422bf8d0b00f9", + "sourceCodeHash": "0xef3d366cd22eac2dfd22a658e003700c679bd9c38758d9c21befa7335bbd82ad" }, "src/L1/L1ERC721Bridge.sol:L1ERC721Bridge": { - "initCodeHash": "0xf1eaecec5e9c9c3d143bc9980d15e4671e97cb840f044bc2189a9d42ea7a1ef7", - "sourceCodeHash": "0x24e870fc3620d07ef9e336bd56e0df0604df69a2909c1aaf709f2c253ad16c78" + "initCodeHash": "0x6f586bf82f6e89b75c2cc707e16a71ac921a911acf00f1594659f82e5c819fcc", + "sourceCodeHash": "0x4d48a9cf80dd288d1c54c9576a1a8c12c1c5b9f1694246d0ebba60996f786b69" }, "src/L1/L1StandardBridge.sol:L1StandardBridge": { - "initCodeHash": "0x11e28569436e16691f03820e0fd5252492706f5855b350439f695c7e4cd331c3", - "sourceCodeHash": "0x11b35ee81f797b30ee834e2ffad52686d2100d7ee139db4299b7d854dba25550" + "initCodeHash": "0xadd7863f0d14360be0f0c575d07aa304457b190b64a91a8976770fb7c34b28a3", + "sourceCodeHash": "0xfca613b5d055ffc4c3cbccb0773ddb9030abedc1aa6508c9e2e7727cc0cd617b" }, "src/L1/OPContractsManager.sol:OPContractsManager": { - "initCodeHash": "0x4db53075da877f54a7098a78bc1dd9c0048e21d35e205e22e84d5642332186a6", - "sourceCodeHash": "0x534b46026c3b77242ee7ab5728515deffdf8143c1b3b819fefa661f8b0b1793b" + "initCodeHash": "0x9f9a3738b05cae6597ea9a5c5747f7dbd3a5328b05a319955054fbd8b1aaa791", + "sourceCodeHash": "0x154c764083f353e2a56337c0dd5cbcd6f2e12c21966cd0580c7a0f96c4e147dd" }, "src/L1/OPContractsManagerStandardValidator.sol:OPContractsManagerStandardValidator": { - "initCodeHash": "0x4c9b9f7888ce14a672dae0f24af9cf20627b1629b5075a364ad17f4db0d06a70", - "sourceCodeHash": "0xb65ed0b9cc62c13a053f1b416792802269be37409df917c31e1140f064cf1073" + "initCodeHash": "0x57d6a6729d887ead009d518e8f17fa0d26bfc97b8efe1494ab4ef8dbb000d109", + "sourceCodeHash": "0x1d58891954cf782d2fe4f112b0c7fd25be991c2b8873f10d8545c653b517cac9" }, "src/L1/OptimismPortal2.sol:OptimismPortal2": { - "initCodeHash": "0x86755dd68913bc19b8b32b7fc9b9b16a732451b04bd6ca90f8f0375988a207f1", - "sourceCodeHash": "0x7162577df2261f8f65c8da16cba9f41320508ee145c7fa87dc0401a72a1e78b2" + "initCodeHash": "0xe0b9780a472f5706c92f45bd6846e4e2a843221d8151458aa03d6e98e1448c9c", + "sourceCodeHash": "0x4b66477a38b5508c03be2d859d72ff025a63cdea4907826689125c7ba464a5b8" + }, + "src/L1/OptimismPortalInterop.sol:OptimismPortalInterop": { + "initCodeHash": "0x087281cd2a48e882648c09fa90bfcca7487d222e16300f9372deba6b2b8ccfad", + "sourceCodeHash": "0x1cc641a4272aea85e13cbf42d9032d1b91ef858eafe3be6b5649cc8504c9cf69" }, "src/L1/ProtocolVersions.sol:ProtocolVersions": { - "initCodeHash": "0x5a76c8530cb24cf23d3baacc6eefaac226382af13f1e2a35535d2ec2b0573b29", - "sourceCodeHash": "0xb3e32b18c95d4940980333e1e99b4dcf42d8a8bfce78139db4dc3fb06e9349d0" + "initCodeHash": "0xcb59ad9a5ec2a0831b7f4daa74bdacba82ffa03035dafb499a732c641e017f4e", + "sourceCodeHash": "0x3b7b7a1023e6e87ce4680eee3cc4eebefc15b5ec80db3d39e824fbdd521762db" }, "src/L1/SuperchainConfig.sol:SuperchainConfig": { - "initCodeHash": "0x0ea921059d71fd19ac9c6e29c05b9724ad584eb27f74231de6df9551e9b13084", - "sourceCodeHash": "0xad12c20a00dc20683bd3f68e6ee254f968da6cc2d98930be6534107ee5cb11d9" + "initCodeHash": "0xfb8c98028f1a0e70bb1afbbc532035ea71b0724883554eeaae62e1910a6c1cd9", + "sourceCodeHash": "0xbf344c4369b8cb00ec7a3108f72795747f3bc59ab5b37ac18cf21e72e2979dbf" }, "src/L1/SystemConfig.sol:SystemConfig": { - "initCodeHash": "0x07b7039de5b8a4dc57642ee9696e949d70516b7f6dce41dde4920efb17105ef2", - "sourceCodeHash": "0x997212ceadabb306c2abd31918b09bccbba0b21662c1d8930a3599831c374b13" + "initCodeHash": "0x6e1e3cf76f08916bf6a3aed2b68772bd5ade935db4f0f876e682dc7a586334fb", + "sourceCodeHash": "0x006e3560f8b2e17133eb10a116916798ddc4345a7b006f8504dab69e810adb1c" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x9b664e3d84ad510091337b4aacaa494b142512e2f6f7fbcdb6210ed62ca9b885", @@ -136,8 +140,8 @@ "sourceCodeHash": "0x734a6b2aa6406bc145d848ad6071d3af1d40852aeb8f4b2f6f51beaad476e2d3" }, "src/cannon/MIPS64.sol:MIPS64": { - "initCodeHash": "0xbc7c3c50e8c3679576f87d79c2dae05dd1174e64bdaa4c1e0857314618e415a3", - "sourceCodeHash": "0xf6e87bf46edca31c2b30c83fdf7b57a7851404743e16dd4f783be3a34c481d76" + "initCodeHash": "0x6a649986370d18e5fddcd89df73e520063fb373f7dba2f731a2b7e79a1c132a5", + "sourceCodeHash": "0x657afae82e6e3627389153736e568bf99498a272ec6d9ecc22ecfd645c56c453" }, "src/cannon/PreimageOracle.sol:PreimageOracle": { "initCodeHash": "0x6af5b0e83b455aab8d0946c160a4dc049a4e03be69f8a2a9e87b574f27b25a66", @@ -152,60 +156,76 @@ "sourceCodeHash": "0xdebf2ab3af4d5549c40e9dd9db6b2458af286f323b6891f3b0c4e89f3c8928db" }, "src/dispute/DisputeGameFactory.sol:DisputeGameFactory": { - "initCodeHash": "0xa3e6a7466e16e6b7a8ce7a257ec543c1bf675e24f53565080d826404654b9262", - "sourceCodeHash": "0x1871aaeba0658f17270190cc95ffff172d92dca795d698401ec34a7462bf5242" + "initCodeHash": "0x41ea0025ffbbb7dabc45da9b8afe4bce6b8ec1f132b424f351cf8c7d3fe15579", + "sourceCodeHash": "0x81ffb8f29b29774847e8b699d8719aaf6c633070841b8e2c3a651105822ce9ea" }, "src/dispute/FaultDisputeGame.sol:FaultDisputeGame": { - "initCodeHash": "0x9748700f873b6fe0599f9674a4c2dfbc9e35bbc918ebd2f7c54f709b1480df36", - "sourceCodeHash": "0xe6d4bdbfb05491164f203f1c5542a7ba961a20727a5b706b393f4f886ba5f901" + "initCodeHash": "0xe7d3c982532946d196d7efadb9e2576c76b8f9e0d1f885ac36977d6f3fb72a65", + "sourceCodeHash": "0x63222e6926c8dd050d1adc0e65039c42382f269c3b0e113751d79e7a5167b7ac" }, "src/dispute/PermissionedDisputeGame.sol:PermissionedDisputeGame": { - "initCodeHash": "0x1018dcbe7714a80a33dd8ad09bcc533dc6cbe1e97d2a17d3780887d406fc46a8", - "sourceCodeHash": "0x09455fe79619e63a08244647dca734fa58e96352fe21aeb289cc467437389125" + "initCodeHash": "0xefa478f976e55eb53fcccf653b202bc2532781230f20013450ce0845b77d815c", + "sourceCodeHash": "0x335a503a4cc02dd30d88d163393680f3fd89168e0faa4fa4b0ae5da399656f91" }, "src/dispute/SuperFaultDisputeGame.sol:SuperFaultDisputeGame": { - "initCodeHash": "0x687bde7b8632b47dc16530cc523946e4109e023f0d32c9bf0281b51f412f0f0d", - "sourceCodeHash": "0x7dd3852f6b744ddfb08699bf2d201eba92314ef70c9c62c06d84b0baac5f0299" + "initCodeHash": "0xe7591ef9c806c236d78ed4b83e81701732e0fe2237d3d455d26f054aefcc54b6", + "sourceCodeHash": "0x089f457ecaa85379bcdb4b843a2b2db9616d87f957f7964de23f80e7655d3f53" }, "src/dispute/SuperPermissionedDisputeGame.sol:SuperPermissionedDisputeGame": { - "initCodeHash": "0x9c954076097eb80f70333a387f12ba190eb9374aebb923ce30ecfe1d17030cc0", - "sourceCodeHash": "0x9baa0f9e744cc0ecc61d0fade8bffc18321b228833ea0904dc645f3975be9ed1" + "initCodeHash": "0x615baee73b605785025893fad655f8b7d8d546d77fbeca1f799000513ded3309", + "sourceCodeHash": "0x8fdd69d4bcd33a3d8b49a73ff5b6855f9ad5f7e2b7393e67cd755973b127b1e8" + }, + "src/dispute/v2/FaultDisputeGameV2.sol:FaultDisputeGameV2": { + "initCodeHash": "0xb5a7bfcbfcb445dc57fc88c07d7305191dc32cc0cf5580d50b4897229e4033c1", + "sourceCodeHash": "0x38fd8878a22564cd63e2bdc6af51edead373e2c4a90e631d2774078bbaa54ea6" + }, + "src/dispute/v2/PermissionedDisputeGameV2.sol:PermissionedDisputeGameV2": { + "initCodeHash": "0xcff1406639ff8a83a4c948f412329956104bc7ee30eb8da169a2ba5ef6d8848f", + "sourceCodeHash": "0x53fdae5faf97beed5f23d4f285bed06766161ab15c88e3a388f84808471a73c3" }, "src/legacy/DeployerWhitelist.sol:DeployerWhitelist": { - "initCodeHash": "0x53099379ed48b87f027d55712dbdd1da7d7099925426eb0531da9c0012e02c29", - "sourceCodeHash": "0xf22c94ed20c32a8ed2705a22d12c6969c3c3bad409c4efe2f95b0db74f210e10" + "initCodeHash": "0x2e0ef4c341367eb59cc6c25190c64eff441d3fe130189da91d4d126f6bdbc9b5", + "sourceCodeHash": "0x99fb495ee1339f399d9e14cc56e4b3b128c67778ad9ca7bad1efbb49eda2ec4c" }, "src/legacy/L1BlockNumber.sol:L1BlockNumber": { - "initCodeHash": "0x60dded11d35e42fe15ef5dd94d28aae6b8ff3e67c6fbbc667a6729fcb3ca7a9a", - "sourceCodeHash": "0x53ef11021a52e9c87024a870566ec5dba1d1a12752396e654904384efdd8203e" + "initCodeHash": "0x7549dcb63799c5f30b405c6f0c1264f55659e812ccab68bf1b36e8707f4ee198", + "sourceCodeHash": "0xf4b4cae7cc81a93d192ce8c54a7b543327458d53f3aaababacea843825bf3e1c" }, "src/legacy/LegacyMessagePasser.sol:LegacyMessagePasser": { - "initCodeHash": "0x3ca911b0578be7f8c91e7d01442a5609f04e5866768f99c8e31627c9ba79c9f0", - "sourceCodeHash": "0x62c9a6182d82692fb9c173ddb0d7978bcff2d1d4dc8cd2f10625e1e65bda6888" + "initCodeHash": "0x3a82e248129d19764bb975bb79b48a982f077f33bb508480bf8d2ec1c0c9810d", + "sourceCodeHash": "0x955bd0c9b47e43219865e4e92abf28d916c96de20cbdf2f94c8ab14d02083759" }, "src/safe/DeputyPauseModule.sol:DeputyPauseModule": { "initCodeHash": "0x4685af7d7c54b3bc5614afb735f34ae311d1d86d5112b9d28d931bc372b94ea8", "sourceCodeHash": "0x2dc7c513be25e1350ae1caa71adad91a7cde91125540699ce83489dd772330ad" }, "src/safe/LivenessGuard.sol:LivenessGuard": { - "initCodeHash": "0xc8e29e8b12f423c8cd229a38bc731240dd815d96f1b0ab96c71494dde63f6a81", - "sourceCodeHash": "0x72b8d8d855e7af8beee29330f6cb9b9069acb32e23ce940002ec9a41aa012a16" + "initCodeHash": "0x406db1c5a127f76970791b8a7f6ff62b81481ab25cf32615bfed551cdd5cd844", + "sourceCodeHash": "0xca3712277637e9d1b63ed16e35ef968032c12be9187c36146c171ac3e9f0cd73" }, "src/safe/LivenessModule.sol:LivenessModule": { - "initCodeHash": "0xde3b3273aa37604048b5fa228b90f3b05997db613dfcda45061545a669b2476a", - "sourceCodeHash": "0x918965e52bbd358ac827ebe35998f5d8fa5ca77d8eb9ab8986b44181b9aaa48a" + "initCodeHash": "0xa4a06e8778dbb6883ece8f56538ba15bc01b3031bba9a12ad9d187e7c8aaa942", + "sourceCodeHash": "0x950725f8b9ad9bb3b6b5e836f67e18db824a7864bac547ee0eeba88ada3de0e9" + }, + "src/safe/LivenessModule2.sol:LivenessModule2": { + "initCodeHash": "0x4679b41e5648a955a883efd0271453c8b13ff4846f853d372527ebb1e0905ab5", + "sourceCodeHash": "0xd3084fb5446782cb6d0adb4278ef0a12c418dd538b4b14b90407b971b44cc35b" + }, + "src/safe/TimelockGuard.sol:TimelockGuard": { + "initCodeHash": "0x1f8188872de93ce59e8f0bd415d4fbf30209bc668c09623f61d6fe592eee895a", + "sourceCodeHash": "0x0dada93f051d29dabbb6de3e1c1ece14b95cd20dc854454926d19ea1ebcae436" }, "src/universal/OptimismMintableERC20.sol:OptimismMintableERC20": { - "initCodeHash": "0xc3289416829b252c830ad7d389a430986a7404df4fe0be37cb19e1c40907f047", - "sourceCodeHash": "0xf5e29dd5c750ea935c7281ec916ba5277f5610a0a9e984e53ae5d5245b3cf2f4" + "initCodeHash": "0x3c85eed0d017dca8eda6396aa842ddc12492587b061e8c756a8d32c4610a9658", + "sourceCodeHash": "0x7023665d461f173417d932b55010b8f6c34f2bbaf56cfe4e1b15862c08cbcaac" }, "src/universal/OptimismMintableERC20Factory.sol:OptimismMintableERC20Factory": { - "initCodeHash": "0xdb4d93a65cf9d3e3af77d3d62249f06580e80a0431542350f953f0a4041566b4", - "sourceCodeHash": "0xd1bad4408c26eb9c7b0ddcb088f0d4e3be73a43d899263ec8610f4d41a178ec7" + "initCodeHash": "0x747baf403205b900e1144038f2b807c84059229aedda8c91936798e1403eda39", + "sourceCodeHash": "0xf71e16aaad1ec2459040ab8c93b7188b2c04c671c21b4d43fba75cab80ed1b21" }, "src/universal/StorageSetter.sol:StorageSetter": { - "initCodeHash": "0x8831c079f7b7a52679e8a15e0ea14e30ea7bb4f93feed0fcd369942fe8c1f1ec", - "sourceCodeHash": "0x42151e2547ec5270353977fd66e78fa1fde18f362d7021cf7ddce16d5201b3ec" + "initCodeHash": "0x1fd4b84add5c5ed80205cea0bbca9115e98d0efb416d9cedc12ce0cff9919bda", + "sourceCodeHash": "0xcfbaae5729ca367328ea546bbbe96194341586b2f4bfbd0cfa84acc09324d59b" }, "src/vendor/asterisc/RISCV.sol:RISCV": { "initCodeHash": "0x4cd639f7da4eaf86a98eb3227fe285c0e8380ff5c79c4745aefed804cef52162", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/DisputeGameFactory.json b/packages/contracts-bedrock/snapshots/storageLayout/DisputeGameFactory.json index f53a86716ca10..e8edf11aae6ea 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/DisputeGameFactory.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/DisputeGameFactory.json @@ -61,5 +61,12 @@ "offset": 0, "slot": "104", "type": "GameId[]" + }, + { + "bytes": "32", + "label": "gameArgs", + "offset": 0, + "slot": "105", + "type": "mapping(GameType => bytes)" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/FaultDisputeGameV2.json b/packages/contracts-bedrock/snapshots/storageLayout/FaultDisputeGameV2.json new file mode 100644 index 0000000000000..efae9aab937c1 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/FaultDisputeGameV2.json @@ -0,0 +1,121 @@ +[ + { + "bytes": "8", + "label": "createdAt", + "offset": 0, + "slot": "0", + "type": "Timestamp" + }, + { + "bytes": "8", + "label": "resolvedAt", + "offset": 8, + "slot": "0", + "type": "Timestamp" + }, + { + "bytes": "1", + "label": "status", + "offset": 16, + "slot": "0", + "type": "enum GameStatus" + }, + { + "bytes": "1", + "label": "initialized", + "offset": 17, + "slot": "0", + "type": "bool" + }, + { + "bytes": "1", + "label": "l2BlockNumberChallenged", + "offset": 18, + "slot": "0", + "type": "bool" + }, + { + "bytes": "20", + "label": "l2BlockNumberChallenger", + "offset": 0, + "slot": "1", + "type": "address" + }, + { + "bytes": "32", + "label": "claimData", + "offset": 0, + "slot": "2", + "type": "struct FaultDisputeGameV2.ClaimData[]" + }, + { + "bytes": "32", + "label": "normalModeCredit", + "offset": 0, + "slot": "3", + "type": "mapping(address => uint256)" + }, + { + "bytes": "32", + "label": "claims", + "offset": 0, + "slot": "4", + "type": "mapping(Hash => bool)" + }, + { + "bytes": "32", + "label": "subgames", + "offset": 0, + "slot": "5", + "type": "mapping(uint256 => uint256[])" + }, + { + "bytes": "32", + "label": "resolvedSubgames", + "offset": 0, + "slot": "6", + "type": "mapping(uint256 => bool)" + }, + { + "bytes": "32", + "label": "resolutionCheckpoints", + "offset": 0, + "slot": "7", + "type": "mapping(uint256 => struct FaultDisputeGameV2.ResolutionCheckpoint)" + }, + { + "bytes": "64", + "label": "startingOutputRoot", + "offset": 0, + "slot": "8", + "type": "struct Proposal" + }, + { + "bytes": "1", + "label": "wasRespectedGameTypeWhenCreated", + "offset": 0, + "slot": "10", + "type": "bool" + }, + { + "bytes": "32", + "label": "refundModeCredit", + "offset": 0, + "slot": "11", + "type": "mapping(address => uint256)" + }, + { + "bytes": "32", + "label": "hasUnlockedCredit", + "offset": 0, + "slot": "12", + "type": "mapping(address => bool)" + }, + { + "bytes": "1", + "label": "bondDistributionMode", + "offset": 0, + "slot": "13", + "type": "enum BondDistributionMode" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/GameHelper.json b/packages/contracts-bedrock/snapshots/storageLayout/GameHelper.json new file mode 100644 index 0000000000000..0637a088a01e8 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/GameHelper.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/LivenessModule2.json b/packages/contracts-bedrock/snapshots/storageLayout/LivenessModule2.json new file mode 100644 index 0000000000000..478b0b25136c3 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/LivenessModule2.json @@ -0,0 +1,16 @@ +[ + { + "bytes": "32", + "label": "livenessSafeConfiguration", + "offset": 0, + "slot": "0", + "type": "mapping(address => struct LivenessModule2.ModuleConfig)" + }, + { + "bytes": "32", + "label": "challengeStartTime", + "offset": 0, + "slot": "1", + "type": "mapping(address => uint256)" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json index a22b6b8e38333..0637a088a01e8 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManager.json @@ -1,16 +1 @@ -[ - { - "bytes": "32", - "label": "L1_CONTRACTS_RELEASE", - "offset": 0, - "slot": "0", - "type": "string" - }, - { - "bytes": "1", - "label": "isRC", - "offset": 0, - "slot": "1", - "type": "bool" - } -] \ No newline at end of file +[] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerContractsContainer.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerContractsContainer.json index a0a6fd6d439c5..d87deb94bc76b 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerContractsContainer.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerContractsContainer.json @@ -7,7 +7,7 @@ "type": "struct OPContractsManager.Blueprints" }, { - "bytes": "416", + "bytes": "448", "label": "implementation", "offset": 0, "slot": "13", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerStandardValidator.json b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerStandardValidator.json index 6ac032e39f1b9..4b248cfef65d2 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerStandardValidator.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OPContractsManagerStandardValidator.json @@ -43,65 +43,79 @@ }, { "bytes": "20", - "label": "ethLockboxImpl", + "label": "optimismPortalInteropImpl", "offset": 0, "slot": "6", "type": "address" }, { "bytes": "20", - "label": "systemConfigImpl", + "label": "ethLockboxImpl", "offset": 0, "slot": "7", "type": "address" }, { "bytes": "20", - "label": "optimismMintableERC20FactoryImpl", + "label": "systemConfigImpl", "offset": 0, "slot": "8", "type": "address" }, { "bytes": "20", - "label": "l1CrossDomainMessengerImpl", + "label": "optimismMintableERC20FactoryImpl", "offset": 0, "slot": "9", "type": "address" }, { "bytes": "20", - "label": "l1StandardBridgeImpl", + "label": "l1CrossDomainMessengerImpl", "offset": 0, "slot": "10", "type": "address" }, { "bytes": "20", - "label": "disputeGameFactoryImpl", + "label": "l1StandardBridgeImpl", "offset": 0, "slot": "11", "type": "address" }, { "bytes": "20", - "label": "anchorStateRegistryImpl", + "label": "disputeGameFactoryImpl", "offset": 0, "slot": "12", "type": "address" }, { "bytes": "20", - "label": "delayedWETHImpl", + "label": "anchorStateRegistryImpl", "offset": 0, "slot": "13", "type": "address" }, { "bytes": "20", - "label": "mipsImpl", + "label": "delayedWETHImpl", "offset": 0, "slot": "14", "type": "address" + }, + { + "bytes": "20", + "label": "mipsImpl", + "offset": 0, + "slot": "15", + "type": "address" + }, + { + "bytes": "32", + "label": "devFeatureBitmap", + "offset": 0, + "slot": "16", + "type": "bytes32" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortal2.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortal2.json index 8dc6639f30366..649ad99cba265 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortal2.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortal2.json @@ -141,7 +141,7 @@ }, { "bytes": "1", - "label": "superRootsActive", + "label": "spacer_63_20_1", "offset": 20, "slot": "63", "type": "bool" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortalInterop.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortalInterop.json new file mode 100644 index 0000000000000..c0bcf34cc422d --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/OptimismPortalInterop.json @@ -0,0 +1,149 @@ +[ + { + "bytes": "1", + "label": "_initialized", + "offset": 0, + "slot": "0", + "type": "uint8" + }, + { + "bytes": "1", + "label": "_initializing", + "offset": 1, + "slot": "0", + "type": "bool" + }, + { + "bytes": "32", + "label": "params", + "offset": 0, + "slot": "1", + "type": "struct ResourceMetering.ResourceParams" + }, + { + "bytes": "1536", + "label": "__gap", + "offset": 0, + "slot": "2", + "type": "uint256[48]" + }, + { + "bytes": "20", + "label": "l2Sender", + "offset": 0, + "slot": "50", + "type": "address" + }, + { + "bytes": "32", + "label": "finalizedWithdrawals", + "offset": 0, + "slot": "51", + "type": "mapping(bytes32 => bool)" + }, + { + "bytes": "32", + "label": "spacer_52_0_32", + "offset": 0, + "slot": "52", + "type": "bytes32" + }, + { + "bytes": "1", + "label": "spacer_53_0_1", + "offset": 0, + "slot": "53", + "type": "bool" + }, + { + "bytes": "20", + "label": "spacer_53_1_20", + "offset": 1, + "slot": "53", + "type": "address" + }, + { + "bytes": "20", + "label": "spacer_54_0_20", + "offset": 0, + "slot": "54", + "type": "address" + }, + { + "bytes": "20", + "label": "systemConfig", + "offset": 0, + "slot": "55", + "type": "contract ISystemConfig" + }, + { + "bytes": "20", + "label": "spacer_56_0_20", + "offset": 0, + "slot": "56", + "type": "address" + }, + { + "bytes": "32", + "label": "provenWithdrawals", + "offset": 0, + "slot": "57", + "type": "mapping(bytes32 => mapping(address => struct OptimismPortalInterop.ProvenWithdrawal))" + }, + { + "bytes": "32", + "label": "spacer_58_0_32", + "offset": 0, + "slot": "58", + "type": "bytes32" + }, + { + "bytes": "4", + "label": "spacer_59_0_4", + "offset": 0, + "slot": "59", + "type": "GameType" + }, + { + "bytes": "8", + "label": "spacer_59_4_8", + "offset": 4, + "slot": "59", + "type": "uint64" + }, + { + "bytes": "32", + "label": "proofSubmitters", + "offset": 0, + "slot": "60", + "type": "mapping(bytes32 => address[])" + }, + { + "bytes": "32", + "label": "spacer_61_0_32", + "offset": 0, + "slot": "61", + "type": "uint256" + }, + { + "bytes": "20", + "label": "anchorStateRegistry", + "offset": 0, + "slot": "62", + "type": "contract IAnchorStateRegistry" + }, + { + "bytes": "20", + "label": "ethLockbox", + "offset": 0, + "slot": "63", + "type": "contract IETHLockbox" + }, + { + "bytes": "1", + "label": "superRootsActive", + "offset": 20, + "slot": "63", + "type": "bool" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/PermissionedDisputeGameV2.json b/packages/contracts-bedrock/snapshots/storageLayout/PermissionedDisputeGameV2.json new file mode 100644 index 0000000000000..efae9aab937c1 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/PermissionedDisputeGameV2.json @@ -0,0 +1,121 @@ +[ + { + "bytes": "8", + "label": "createdAt", + "offset": 0, + "slot": "0", + "type": "Timestamp" + }, + { + "bytes": "8", + "label": "resolvedAt", + "offset": 8, + "slot": "0", + "type": "Timestamp" + }, + { + "bytes": "1", + "label": "status", + "offset": 16, + "slot": "0", + "type": "enum GameStatus" + }, + { + "bytes": "1", + "label": "initialized", + "offset": 17, + "slot": "0", + "type": "bool" + }, + { + "bytes": "1", + "label": "l2BlockNumberChallenged", + "offset": 18, + "slot": "0", + "type": "bool" + }, + { + "bytes": "20", + "label": "l2BlockNumberChallenger", + "offset": 0, + "slot": "1", + "type": "address" + }, + { + "bytes": "32", + "label": "claimData", + "offset": 0, + "slot": "2", + "type": "struct FaultDisputeGameV2.ClaimData[]" + }, + { + "bytes": "32", + "label": "normalModeCredit", + "offset": 0, + "slot": "3", + "type": "mapping(address => uint256)" + }, + { + "bytes": "32", + "label": "claims", + "offset": 0, + "slot": "4", + "type": "mapping(Hash => bool)" + }, + { + "bytes": "32", + "label": "subgames", + "offset": 0, + "slot": "5", + "type": "mapping(uint256 => uint256[])" + }, + { + "bytes": "32", + "label": "resolvedSubgames", + "offset": 0, + "slot": "6", + "type": "mapping(uint256 => bool)" + }, + { + "bytes": "32", + "label": "resolutionCheckpoints", + "offset": 0, + "slot": "7", + "type": "mapping(uint256 => struct FaultDisputeGameV2.ResolutionCheckpoint)" + }, + { + "bytes": "64", + "label": "startingOutputRoot", + "offset": 0, + "slot": "8", + "type": "struct Proposal" + }, + { + "bytes": "1", + "label": "wasRespectedGameTypeWhenCreated", + "offset": 0, + "slot": "10", + "type": "bool" + }, + { + "bytes": "32", + "label": "refundModeCredit", + "offset": 0, + "slot": "11", + "type": "mapping(address => uint256)" + }, + { + "bytes": "32", + "label": "hasUnlockedCredit", + "offset": 0, + "slot": "12", + "type": "mapping(address => bool)" + }, + { + "bytes": "1", + "label": "bondDistributionMode", + "offset": 0, + "slot": "13", + "type": "enum BondDistributionMode" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/SystemConfig.json b/packages/contracts-bedrock/snapshots/storageLayout/SystemConfig.json index be5a739fa699e..3a80e68ed8b92 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/SystemConfig.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/SystemConfig.json @@ -124,5 +124,19 @@ "offset": 0, "slot": "108", "type": "contract ISuperchainConfig" + }, + { + "bytes": "8", + "label": "minBaseFee", + "offset": 20, + "slot": "108", + "type": "uint64" + }, + { + "bytes": "32", + "label": "isFeatureEnabled", + "offset": 0, + "slot": "109", + "type": "mapping(bytes32 => bool)" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/TimelockGuard.json b/packages/contracts-bedrock/snapshots/storageLayout/TimelockGuard.json new file mode 100644 index 0000000000000..97c754bfc8c5a --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/TimelockGuard.json @@ -0,0 +1,9 @@ +[ + { + "bytes": "32", + "label": "_safeState", + "offset": 0, + "slot": "0", + "type": "mapping(contract GnosisSafe => struct TimelockGuard.SafeState)" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol index cee1d3765eb8e..b6208c8018091 100644 --- a/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol +++ b/packages/contracts-bedrock/src/L1/L1CrossDomainMessenger.sol @@ -36,14 +36,14 @@ contract L1CrossDomainMessenger is CrossDomainMessenger, ProxyAdminOwnedBase, Re address private spacer_253_0_20; /// @notice Semantic version. - /// @custom:semver 2.9.0 - string public constant version = "2.9.0"; + /// @custom:semver 2.11.0 + string public constant version = "2.11.0"; /// @notice Contract of the SystemConfig. ISystemConfig public systemConfig; /// @notice Constructs the L1CrossDomainMessenger contract. - constructor() ReinitializableBase(2) { + constructor() ReinitializableBase(3) { _disableInitializers(); } @@ -60,16 +60,6 @@ contract L1CrossDomainMessenger is CrossDomainMessenger, ProxyAdminOwnedBase, Re __CrossDomainMessenger_init({ _otherMessenger: CrossDomainMessenger(Predeploys.L2_CROSS_DOMAIN_MESSENGER) }); } - /// @notice Upgrades the contract to have a reference to the SystemConfig. - /// @param _systemConfig The new SystemConfig contract. - function upgrade(ISystemConfig _systemConfig) external reinitializer(initVersion()) { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - systemConfig = _systemConfig; - } - /// @inheritdoc CrossDomainMessenger function paused() public view override returns (bool) { return systemConfig.paused(); diff --git a/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol b/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol index 9fe0ce893331e..98c557a603c37 100644 --- a/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol +++ b/packages/contracts-bedrock/src/L1/L1ERC721Bridge.sol @@ -33,14 +33,14 @@ contract L1ERC721Bridge is ERC721Bridge, ProxyAdminOwnedBase, ReinitializableBas address private spacer_50_0_20; /// @notice Semantic version. - /// @custom:semver 2.7.0 - string public constant version = "2.7.0"; + /// @custom:semver 2.9.0 + string public constant version = "2.9.0"; /// @notice Address of the SystemConfig contract. ISystemConfig public systemConfig; /// @notice Constructs the L1ERC721Bridge contract. - constructor() ERC721Bridge() ReinitializableBase(2) { + constructor() ERC721Bridge() ReinitializableBase(3) { _disableInitializers(); } @@ -62,16 +62,6 @@ contract L1ERC721Bridge is ERC721Bridge, ProxyAdminOwnedBase, ReinitializableBas __ERC721Bridge_init({ _messenger: _messenger, _otherBridge: ERC721Bridge(payable(Predeploys.L2_ERC721_BRIDGE)) }); } - /// @notice Upgrades the contract to have a reference to the SystemConfig. - /// @param _systemConfig SystemConfig contract. - function upgrade(ISystemConfig _systemConfig) external reinitializer(initVersion()) { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - systemConfig = _systemConfig; - } - /// @inheritdoc ERC721Bridge function paused() public view override returns (bool) { return systemConfig.paused(); diff --git a/packages/contracts-bedrock/src/L1/L1StandardBridge.sol b/packages/contracts-bedrock/src/L1/L1StandardBridge.sol index 504ba854fb5ba..465ec7f7ed5eb 100644 --- a/packages/contracts-bedrock/src/L1/L1StandardBridge.sol +++ b/packages/contracts-bedrock/src/L1/L1StandardBridge.sol @@ -77,8 +77,8 @@ contract L1StandardBridge is StandardBridge, ProxyAdminOwnedBase, Reinitializabl ); /// @notice Semantic version. - /// @custom:semver 2.6.0 - string public constant version = "2.6.0"; + /// @custom:semver 2.8.0 + string public constant version = "2.8.0"; /// @custom:legacy /// @custom:spacer superchainConfig @@ -94,7 +94,7 @@ contract L1StandardBridge is StandardBridge, ProxyAdminOwnedBase, Reinitializabl ISystemConfig public systemConfig; /// @notice Constructs the L1StandardBridge contract. - constructor() StandardBridge() ReinitializableBase(2) { + constructor() StandardBridge() ReinitializableBase(3) { _disableInitializers(); } @@ -119,16 +119,6 @@ contract L1StandardBridge is StandardBridge, ProxyAdminOwnedBase, Reinitializabl }); } - /// @notice Upgrades the contract to have a reference to the SystemConfig. - /// @param _systemConfig SystemConfig contract. - function upgrade(ISystemConfig _systemConfig) external reinitializer(initVersion()) { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - systemConfig = _systemConfig; - } - /// @inheritdoc StandardBridge function paused() public view override returns (bool) { return systemConfig.paused(); diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index ee4ca4922df34..142fa67ea358f 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -1,12 +1,18 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; +// Contracts +import { OPContractsManagerStandardValidator } from "src/L1/OPContractsManagerStandardValidator.sol"; + // Libraries import { Blueprint } from "src/libraries/Blueprint.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Bytes } from "src/libraries/Bytes.sol"; -import { Claim, Duration, GameType, Hash, GameTypes, Proposal } from "src/dispute/lib/Types.sol"; +import { Claim, Duration, GameType, GameTypes, Proposal } from "src/dispute/lib/Types.sol"; import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; +import { Features } from "src/libraries/Features.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { ISemver } from "interfaces/universal/ISemver.sol"; @@ -25,14 +31,13 @@ import { ISuperPermissionedDisputeGame } from "interfaces/dispute/ISuperPermissi import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; -import { IHasSuperchainConfig } from "interfaces/L1/IHasSuperchainConfig.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; -import { OPContractsManagerStandardValidator } from "src/L1/OPContractsManagerStandardValidator.sol"; contract OPContractsManagerContractsContainer { /// @notice Addresses of the Blueprint contracts. @@ -43,12 +48,30 @@ contract OPContractsManagerContractsContainer { /// @notice Addresses of the latest implementation contracts. OPContractsManager.Implementations internal implementation; + /// @notice Bitmap of development features that are enabled. We keep the development feature + /// bitmap here rather than in the actual OPCM because other contracts always get a + /// reference to this but not to the OPCM itself. + bytes32 public immutable devFeatureBitmap; + + /// @notice Thrown when a development feature is enabled in production. + error OPContractsManagerContractsContainer_DevFeatureInProd(); + + /// @param _blueprints The blueprint contract addresses. + /// @param _implementations The implementation contract addresses. + /// @param _devFeatureBitmap The bitmap of development features that are enabled. constructor( OPContractsManager.Blueprints memory _blueprints, - OPContractsManager.Implementations memory _implementations + OPContractsManager.Implementations memory _implementations, + bytes32 _devFeatureBitmap ) { blueprint = _blueprints; implementation = _implementations; + devFeatureBitmap = _devFeatureBitmap; + + // Development features MUST NOT be enabled on Mainnet. + if (block.chainid == 1 && !_isTestingEnvironment() && uint256(_devFeatureBitmap) != 0) { + revert OPContractsManagerContractsContainer_DevFeatureInProd(); + } } function blueprints() public view returns (OPContractsManager.Blueprints memory) { @@ -58,6 +81,24 @@ contract OPContractsManagerContractsContainer { function implementations() public view returns (OPContractsManager.Implementations memory) { return implementation; } + + /// @notice Returns the status of a development feature. Note that this function does not check + /// that the input feature represents a single feature and the bitwise AND operation + /// allows for multiple features to be enabled at once. Users should generally check + /// for only a single feature at a time. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _feature) public view returns (bool) { + return DevFeatures.isDevFeatureEnabled(devFeatureBitmap, _feature); + } + + /// @notice Returns true if the contract is running in a testing environment. Checks that the + /// code for the address 0xbeefcafe is not zero, which is an address that should never + /// have any code in production environments but can be made to have code in tests. + /// @return True if the contract is running in a testing environment, false otherwise. + function _isTestingEnvironment() public view returns (bool) { + return address(0xbeefcafe).code.length > 0; + } } abstract contract OPContractsManagerBase { @@ -97,6 +138,21 @@ abstract contract OPContractsManagerBase { return contractsContainer.blueprints(); } + /// @notice Retrieves the development feature bitmap stored in this OPCM contract + function devFeatureBitmap() public view returns (bytes32) { + return contractsContainer.devFeatureBitmap(); + } + + /// @notice Retrieves the status of a development feature. Note that this function does not check + /// that the input feature represents a single feature and the bitwise AND operation + /// allows for multiple features to be enabled at once. Users should generally check + /// for only a single feature at a time. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _feature) public view returns (bool) { + return contractsContainer.isDevFeatureEnabled(_feature); + } + /// @notice Maps an L2 chain ID to an L1 batch inbox address as defined by the standard /// configuration's convention. This convention is `versionByte || keccak256(bytes32(chainId))[:19]`, /// where || denotes concatenation`, versionByte is 0x00, and chainId is a uint256. @@ -128,12 +184,9 @@ abstract contract OPContractsManagerBase { /// This method should be used as the salt mixer when deploying contracts when there is no user /// provided salt mixer. This protects against a situation where multiple chains with the same /// L2 chain ID exist, which would otherwise result in address collisions. - function reusableSaltMixer(OPContractsManager.OpChainConfig memory _opChainConfig) - internal - pure - returns (string memory) - { - return string(bytes.concat(bytes32(uint256(uint160(address(_opChainConfig.systemConfigProxy)))))); + /// @param _systemConfigProxy The SystemConfig contract found in the OpChainConfig of the chain being deployed to. + function reusableSaltMixer(ISystemConfig _systemConfigProxy) internal pure returns (string memory) { + return string(bytes.concat(bytes32(uint256(uint160(address(_systemConfigProxy)))))); } /// @notice Deterministically deploys a new proxy contract owned by the provided ProxyAdmin. @@ -279,6 +332,7 @@ abstract contract OPContractsManagerBase { if ( gameType.raw() == GameTypes.SUPER_CANNON.raw() || gameType.raw() == GameTypes.SUPER_PERMISSIONED_CANNON.raw() + || gameType.raw() == GameTypes.SUPER_CANNON_KONA.raw() ) { l2ChainId = 0; } else { @@ -405,7 +459,13 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { OPContractsManager.Blueprints memory bps = getBlueprints(); // Determine the contract name and blueprints for the game type. - if (gameConfig.disputeGameType.raw() == GameTypes.CANNON.raw()) { + if ( + gameConfig.disputeGameType.raw() == GameTypes.CANNON.raw() + || ( + isDevFeatureEnabled(DevFeatures.CANNON_KONA) + && gameConfig.disputeGameType.raw() == GameTypes.CANNON_KONA.raw() + ) + ) { gameContractName = "FaultDisputeGame"; blueprint1 = bps.permissionlessDisputeGame1; blueprint2 = bps.permissionlessDisputeGame2; @@ -415,7 +475,13 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { blueprint1 = bps.permissionedDisputeGame1; blueprint2 = bps.permissionedDisputeGame2; gameL2ChainId = l2ChainId; - } else if (gameConfig.disputeGameType.raw() == GameTypes.SUPER_CANNON.raw()) { + } else if ( + gameConfig.disputeGameType.raw() == GameTypes.SUPER_CANNON.raw() + || ( + isDevFeatureEnabled(DevFeatures.CANNON_KONA) + && gameConfig.disputeGameType.raw() == GameTypes.SUPER_CANNON_KONA.raw() + ) + ) { gameContractName = "SuperFaultDisputeGame"; blueprint1 = bps.superPermissionlessDisputeGame1; blueprint2 = bps.superPermissionlessDisputeGame2; @@ -497,28 +563,28 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { return outputs; } - /// @notice Updates the prestate hash for a given game type while keeping all other game + /// @notice Updates the prestate hash for all deployed dispute games while keeping all other game /// parameters exactly the same. Currently requires deploying a new implementation /// as there is no way to update the prestate on an existing implementation. /// @param _prestateUpdateInputs The new prestate hash to use. - function updatePrestate(OPContractsManager.OpChainConfig[] memory _prestateUpdateInputs) public { + function updatePrestate(OPContractsManager.UpdatePrestateInput[] memory _prestateUpdateInputs) public { // Loop through each chain and prestate hash for (uint256 i = 0; i < _prestateUpdateInputs.length; i++) { - // Ensure that the prestate is not the zero hash. - if (Claim.unwrap(_prestateUpdateInputs[i].absolutePrestate) == bytes32(0)) { - revert OPContractsManager.PrestateRequired(); - } - // Grab the DisputeGameFactory. IDisputeGameFactory dgf = IDisputeGameFactory(_prestateUpdateInputs[i].systemConfigProxy.disputeGameFactory()); + uint256 numGameTypes = isDevFeatureEnabled(DevFeatures.CANNON_KONA) ? 6 : 4; // Create an array of all of the potential game types to update. - GameType[] memory gameTypes = new GameType[](4); + GameType[] memory gameTypes = new GameType[](numGameTypes); gameTypes[0] = GameTypes.CANNON; gameTypes[1] = GameTypes.PERMISSIONED_CANNON; gameTypes[2] = GameTypes.SUPER_CANNON; gameTypes[3] = GameTypes.SUPER_PERMISSIONED_CANNON; + if (isDevFeatureEnabled(DevFeatures.CANNON_KONA)) { + gameTypes[4] = GameTypes.CANNON_KONA; + gameTypes[5] = GameTypes.SUPER_CANNON_KONA; + } // Track if we have a legacy game, super game, or both. We will revert if this function // is ever called with a mix of legacy and super games. Should never happen in @@ -542,6 +608,7 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { if ( gameType.raw() == GameTypes.SUPER_CANNON.raw() || gameType.raw() == GameTypes.SUPER_PERMISSIONED_CANNON.raw() + || gameType.raw() == GameTypes.SUPER_CANNON_KONA.raw() ) { hasSuperGame = true; } else { @@ -553,15 +620,26 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { revert OPContractsManagerGameTypeAdder_MixedGameTypes(); } + // Select the prestate to use + Claim prestate = gameType.raw() == GameTypes.CANNON_KONA.raw() + || gameType.raw() == GameTypes.SUPER_CANNON_KONA.raw() + ? _prestateUpdateInputs[i].cannonKonaPrestate + : _prestateUpdateInputs[i].cannonPrestate; + + // Ensure that the prestate is not the zero hash. + if (Claim.unwrap(prestate) == bytes32(0)) { + revert OPContractsManager.PrestateRequired(); + } + // Grab the existing game constructor params and init bond. IFaultDisputeGame.GameConstructorParams memory gameParams = getGameConstructorParams(existingGame); // Create a new game input with the updated prestate. OPContractsManager.AddGameInput memory input = OPContractsManager.AddGameInput({ - disputeAbsolutePrestate: _prestateUpdateInputs[i].absolutePrestate, - saltMixer: reusableSaltMixer(_prestateUpdateInputs[i]), + disputeAbsolutePrestate: prestate, + saltMixer: reusableSaltMixer(_prestateUpdateInputs[i].systemConfigProxy), systemConfig: _prestateUpdateInputs[i].systemConfigProxy, - proxyAdmin: _prestateUpdateInputs[i].proxyAdmin, + proxyAdmin: _prestateUpdateInputs[i].systemConfigProxy.proxyAdmin(), delayedWETH: IDelayedWETH(payable(address(gameParams.weth))), disputeGameType: gameParams.gameType, disputeMaxGameDepth: gameParams.maxGameDepth, @@ -592,265 +670,203 @@ contract OPContractsManagerUpgrader is OPContractsManagerBase { /// @notice Thrown when the SuperchainConfig contract does not match the unified config. error OPContractsManagerUpgrader_SuperchainConfigMismatch(); + /// @notice Thrown when upgrade is called with a chain whose superchainConfig is not upgraded. + error OPContractsManagerUpgrader_SuperchainConfigNeedsUpgrade(uint256 index); + + /// @notice Thrown when upgradeSuperchainConfig is called with a superchainConfig that is already up to date. + error OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate(); + /// @param _contractsContainer The OPContractsManagerContractsContainer to use. constructor(OPContractsManagerContractsContainer _contractsContainer) OPContractsManagerBase(_contractsContainer) { } /// @notice Upgrades a set of chains to the latest implementation contracts - /// @param _superchainConfig The SuperchainConfig contract to upgrade - /// @param _superchainProxyAdmin The ProxyAdmin contract for the SuperchainConfig /// @param _opChainConfigs Array of OpChain structs, one per chain to upgrade - /// @dev This function is intended to be called via DELEGATECALL from the Upgrade Controller Safe - function upgrade( - ISuperchainConfig _superchainConfig, - IProxyAdmin _superchainProxyAdmin, - OPContractsManager.OpChainConfig[] memory _opChainConfigs - ) - external - virtual - { + /// @dev This function is intended to be DELEGATECALLed by an address that is the common owner of every chain in + /// `_opChainConfigs`'s ProxyAdmin. + /// @dev This function requires that each chain's superchainConfig is already upgraded. + function upgrade(OPContractsManager.OpChainConfig[] memory _opChainConfigs) external virtual { + // Grab the implementations. OPContractsManager.Implementations memory impls = getImplementations(); - // If the SuperchainConfig is not already upgraded, upgrade it. NOTE that this type of - // upgrade means that chains can ONLY be upgraded via this OPCM contract if they use the - // same SuperchainConfig contract. We will assert this later. - if (_superchainProxyAdmin.getProxyImplementation(address(_superchainConfig)) != impls.superchainConfigImpl) { - // Attempt to upgrade. If the ProxyAdmin is not the SuperchainConfig's admin, this will revert. - upgradeToAndCall( - _superchainProxyAdmin, - address(_superchainConfig), - impls.superchainConfigImpl, - abi.encodeCall(ISuperchainConfig.upgrade, ()) - ); - } - // Loop through each chain and upgrade. for (uint256 i = 0; i < _opChainConfigs.length; i++) { assertValidOpChainConfig(_opChainConfigs[i]); + uint256 l2ChainId = _opChainConfigs[i].systemConfigProxy.l2ChainId(); - // Use the SystemConfig to grab the DisputeGameFactory address. - IDisputeGameFactory dgf = IDisputeGameFactory(_opChainConfigs[i].systemConfigProxy.disputeGameFactory()); - - // Need to upgrade the DisputeGameFactory implementation, no internal upgrade call. - upgradeTo(_opChainConfigs[i].proxyAdmin, address(dgf), impls.disputeGameFactoryImpl); + // Grab the SuperchainConfig. + ISuperchainConfig superchainConfig = _opChainConfigs[i].systemConfigProxy.superchainConfig(); - // All chains have the PermissionedDisputeGame, grab that. - IPermissionedDisputeGame permissionedDisputeGame = - IPermissionedDisputeGame(address(getGameImplementation(dgf, GameTypes.PERMISSIONED_CANNON))); - - // Grab the L2 chain ID from the PermissionedDisputeGame. - uint256 l2ChainId = getL2ChainId(IFaultDisputeGame(address(permissionedDisputeGame))); - - // Pull out the OptimismPortal from the SystemConfig. - IOptimismPortal optimismPortal = - IOptimismPortal(payable(_opChainConfigs[i].systemConfigProxy.optimismPortal())); - - // Assert that SuperchainConfig matches the unified config. - if (optimismPortal.superchainConfig() != _superchainConfig) { - revert OPContractsManagerUpgrader_SuperchainConfigMismatch(); + // If the SuperchainConfig is not already upgraded, revert. + if (SemverComp.lt(superchainConfig.version(), ISuperchainConfig(impls.superchainConfigImpl).version())) { + revert OPContractsManagerUpgrader_SuperchainConfigNeedsUpgrade(i); } - // Start by upgrading the SystemConfig contract to have the l2ChainId and - // SuperchainConfig. We can get the SuperchainConfig from the existing OptimismPortal, - // we need to inline the call to avoid a stack too deep error. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(_opChainConfigs[i].systemConfigProxy), - impls.systemConfigImpl, - abi.encodeCall(ISystemConfig.upgrade, (l2ChainId, _superchainConfig)) - ); + // Do the chain upgrade. + // All of your updates should be done in this internal function unless you're making a + // change to how upgrades work in general. + _doChainUpgrade(impls, _opChainConfigs[i], l2ChainId); - // Separate context to avoid stack too deep. - IAnchorStateRegistry newAnchorStateRegistryProxy; - { - // Grab the current respectedGameType from the OptimismPortal contract before the - // upgrade. - GameType respectedGameType = optimismPortal.respectedGameType(); + // Emit the upgraded event with the address of the caller. Since this will be a delegatecall, + // the caller will be the value of the ADDRESS opcode. + emit Upgraded(l2ChainId, _opChainConfigs[i].systemConfigProxy, address(this)); + } + } - // Deploy a new AnchorStateRegistry contract. - // We use the SOT suffix to avoid CREATE2 conflicts with the existing ASR. - newAnchorStateRegistryProxy = IAnchorStateRegistry( - deployProxy({ - _l2ChainId: l2ChainId, - _proxyAdmin: _opChainConfigs[i].proxyAdmin, - _saltMixer: reusableSaltMixer(_opChainConfigs[i]), - _contractName: "AnchorStateRegistry-U16" - }) - ); + /// @notice Performs an upgrade for a specific chain. + /// @param _impls The implementations of the contracts. + /// @param _opChainConfig The configuration of the chain to upgrade. + /// @param _l2ChainId The L2 chain ID of the chain to upgrade. + function _doChainUpgrade( + OPContractsManager.Implementations memory _impls, + OPContractsManager.OpChainConfig memory _opChainConfig, + uint256 _l2ChainId + ) + internal + { + // Upgrade the SystemConfig first. + upgradeTo(_opChainConfig.proxyAdmin, address(_opChainConfig.systemConfigProxy), _impls.systemConfigImpl); - // Separate context to avoid stack too deep. - { - // Get the existing anchor root from the old AnchorStateRegistry contract. - // Get the AnchorStateRegistry from the PermissionedDisputeGame. - (Hash root, uint256 l2BlockNumber) = getAnchorStateRegistry( - IFaultDisputeGame(address(permissionedDisputeGame)) - ).anchors(respectedGameType); - - // Upgrade and initialize the AnchorStateRegistry contract. - // Since this is a net-new contract, we need to initialize it. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(newAnchorStateRegistryProxy), - impls.anchorStateRegistryImpl, - abi.encodeCall( - IAnchorStateRegistry.initialize, - ( - _opChainConfigs[i].systemConfigProxy, - dgf, - Proposal({ root: root, l2SequenceNumber: l2BlockNumber }), - respectedGameType - ) - ) - ); - } - } + // Grab the OptimismPortal contract. + IOptimismPortal optimismPortal = IOptimismPortal(payable(_opChainConfig.systemConfigProxy.optimismPortal())); + // Upgrade the OptimismPortal contract. + if (isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + // This does NOT run in production. // Upgrade the OptimismPortal contract implementation. - upgradeTo(_opChainConfigs[i].proxyAdmin, address(optimismPortal), impls.optimismPortalImpl); + upgradeTo(_opChainConfig.proxyAdmin, address(optimismPortal), _impls.optimismPortalInteropImpl); - // Separate context to avoid stack too deep. - { + // If we don't already have an ETHLockbox, deploy and initialize it. + IETHLockbox ethLockbox = optimismPortal.ethLockbox(); + if (address(ethLockbox) == address(0)) { // Deploy the ETHLockbox proxy. - IETHLockbox ethLockbox = IETHLockbox( + ethLockbox = IETHLockbox( deployProxy({ - _l2ChainId: l2ChainId, - _proxyAdmin: _opChainConfigs[i].proxyAdmin, - _saltMixer: reusableSaltMixer(_opChainConfigs[i]), - _contractName: "ETHLockbox-U16" + _l2ChainId: _l2ChainId, + _proxyAdmin: _opChainConfig.proxyAdmin, + _saltMixer: reusableSaltMixer(_opChainConfig.systemConfigProxy), + _contractName: "ETHLockbox-U16a" }) ); - // Upgrade the OptimismPortal contract first so that the SystemConfig will have - // the SuperchainConfig reference required in the ETHLockbox. - optimismPortal.upgrade(newAnchorStateRegistryProxy, ethLockbox); - // Initialize the ETHLockbox setting the OptimismPortal as an authorized portal. IOptimismPortal[] memory portals = new IOptimismPortal[](1); portals[0] = optimismPortal; upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, + _opChainConfig.proxyAdmin, address(ethLockbox), - impls.ethLockboxImpl, - abi.encodeCall(IETHLockbox.initialize, (_opChainConfigs[i].systemConfigProxy, portals)) + _impls.ethLockboxImpl, + abi.encodeCall(IETHLockbox.initialize, (_opChainConfig.systemConfigProxy, portals)) ); // Migrate liquidity from the OptimismPortal to the ETHLockbox. - optimismPortal.migrateLiquidity(); + IOptimismPortalInterop(payable(optimismPortal)).migrateLiquidity(); } - // Separate context to avoid stack too deep. - { - // Grab chain addresses here. We need to do this after the SystemConfig upgrade or - // the addresses will be incorrect. - ISystemConfig.Addresses memory opChainAddrs = _opChainConfigs[i].systemConfigProxy.getAddresses(); + // Use the existing AnchorStateRegistry reference. + IAnchorStateRegistry anchorStateRegistry = optimismPortal.anchorStateRegistry(); - // Upgrade the L1CrossDomainMessenger contract. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(IL1CrossDomainMessenger(opChainAddrs.l1CrossDomainMessenger)), - impls.l1CrossDomainMessengerImpl, - abi.encodeCall(IL1CrossDomainMessenger.upgrade, (_opChainConfigs[i].systemConfigProxy)) - ); + // Upgrade the OptimismPortal contract first so that the SystemConfig will have + // the SuperchainConfig reference required in the ETHLockbox. + IOptimismPortalInterop(payable(optimismPortal)).upgrade(anchorStateRegistry, ethLockbox); + } else { + // This runs in production. + upgradeTo(_opChainConfig.proxyAdmin, address(optimismPortal), _impls.optimismPortalImpl); + } - // Upgrade the L1StandardBridge contract. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(IL1StandardBridge(payable(opChainAddrs.l1StandardBridge))), - impls.l1StandardBridgeImpl, - abi.encodeCall(IL1StandardBridge.upgrade, (_opChainConfigs[i].systemConfigProxy)) - ); + // Upgrade the OptimismMintableERC20Factory contract. + upgradeTo( + _opChainConfig.proxyAdmin, + _opChainConfig.systemConfigProxy.optimismMintableERC20Factory(), + _impls.optimismMintableERC20FactoryImpl + ); - // Upgrade the L1ERC721Bridge contract. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(IL1ERC721Bridge(opChainAddrs.l1ERC721Bridge)), - impls.l1ERC721BridgeImpl, - abi.encodeCall(IL1ERC721Bridge.upgrade, (_opChainConfigs[i].systemConfigProxy)) - ); - } + // Use the SystemConfig to grab the DisputeGameFactory address. + IDisputeGameFactory dgf = IDisputeGameFactory(_opChainConfig.systemConfigProxy.disputeGameFactory()); - // We also need to redeploy the dispute games because the AnchorStateRegistry is new. - // Separate context to avoid stack too deep. - { - // Create a new DelayedWETH for the permissioned game. - IDelayedWETH permissionedDelayedWeth = IDelayedWETH( - payable( - deployProxy({ - _l2ChainId: l2ChainId, - _proxyAdmin: _opChainConfigs[i].proxyAdmin, - _saltMixer: reusableSaltMixer(_opChainConfigs[i]), - _contractName: "PermissionedDelayedWETH-U16" - }) - ) - ); + // Need to upgrade the DisputeGameFactory implementation, no internal upgrade call. + upgradeTo(_opChainConfig.proxyAdmin, address(dgf), _impls.disputeGameFactoryImpl); - // Initialize the DelayedWETH. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(permissionedDelayedWeth), - impls.delayedWETHImpl, - abi.encodeCall(IDelayedWETH.initialize, (_opChainConfigs[i].systemConfigProxy)) - ); + // Separate context to avoid stack too deep. + { + // Grab chain addresses here. We need to do this after the SystemConfig upgrade or + // the addresses will be incorrect. + ISystemConfig.Addresses memory opChainAddrs = _opChainConfig.systemConfigProxy.getAddresses(); + + // Upgrade the L1CrossDomainMessenger contract. + upgradeTo( + _opChainConfig.proxyAdmin, + address(IL1CrossDomainMessenger(opChainAddrs.l1CrossDomainMessenger)), + _impls.l1CrossDomainMessengerImpl + ); - // Deploy and set a new permissioned game to update its prestate. - deployAndSetNewGameImpl({ - _l2ChainId: l2ChainId, - _disputeGame: IDisputeGame(address(permissionedDisputeGame)), - _newDelayedWeth: permissionedDelayedWeth, - _newAnchorStateRegistryProxy: newAnchorStateRegistryProxy, - _gameType: GameTypes.PERMISSIONED_CANNON, - _opChainConfig: _opChainConfigs[i] - }); - } + // Upgrade the L1StandardBridge contract. + upgradeTo( + _opChainConfig.proxyAdmin, + address(IL1StandardBridge(payable(opChainAddrs.l1StandardBridge))), + _impls.l1StandardBridgeImpl + ); - // Separate context to avoid stack too deep. - { - // Now retrieve the permissionless game. - IFaultDisputeGame permissionlessDisputeGame = - IFaultDisputeGame(address(getGameImplementation(dgf, GameTypes.CANNON))); - - // If it exists, replace its implementation. - if (address(permissionlessDisputeGame) != address(0)) { - // Create a new DelayedWETH for the permissionless game. - IDelayedWETH permissionlessDelayedWeth = IDelayedWETH( - payable( - deployProxy({ - _l2ChainId: l2ChainId, - _proxyAdmin: _opChainConfigs[i].proxyAdmin, - _saltMixer: reusableSaltMixer(_opChainConfigs[i]), - _contractName: "PermissionlessDelayedWETH-U16" - }) - ) - ); - - // Initialize the DelayedWETH. - upgradeToAndCall( - _opChainConfigs[i].proxyAdmin, - address(permissionlessDelayedWeth), - impls.delayedWETHImpl, - abi.encodeCall(IDelayedWETH.initialize, (_opChainConfigs[i].systemConfigProxy)) - ); - - // Deploy and set a new permissionless game to update its prestate - deployAndSetNewGameImpl({ - _l2ChainId: l2ChainId, - _disputeGame: IDisputeGame(address(permissionlessDisputeGame)), - _newDelayedWeth: permissionlessDelayedWeth, - _newAnchorStateRegistryProxy: newAnchorStateRegistryProxy, - _gameType: GameTypes.CANNON, - _opChainConfig: _opChainConfigs[i] - }); - } - } + // Upgrade the L1ERC721Bridge contract. + upgradeTo( + _opChainConfig.proxyAdmin, + address(IL1ERC721Bridge(opChainAddrs.l1ERC721Bridge)), + _impls.l1ERC721BridgeImpl + ); + } - // Emit the upgraded event with the address of the caller. Since this will be a delegatecall, - // the caller will be the value of the ADDRESS opcode. - emit Upgraded(l2ChainId, _opChainConfigs[i].systemConfigProxy, address(this)); + // All chains have the PermissionedDisputeGame, grab that. + IPermissionedDisputeGame permissionedDisputeGame = + IPermissionedDisputeGame(address(getGameImplementation(dgf, GameTypes.PERMISSIONED_CANNON))); + + // Update the PermissionedDisputeGame. + // We're reusing the same DelayedWETH and ASR contracts. + deployAndSetNewGameImpl({ + _l2ChainId: _l2ChainId, + _disputeGame: IDisputeGame(address(permissionedDisputeGame)), + _newDelayedWeth: permissionedDisputeGame.weth(), + _newAnchorStateRegistryProxy: permissionedDisputeGame.anchorStateRegistry(), + _gameType: GameTypes.PERMISSIONED_CANNON, + _opChainConfig: _opChainConfig + }); + + // Now retrieve the permissionless game. + IFaultDisputeGame permissionlessDisputeGame = + IFaultDisputeGame(address(getGameImplementation(dgf, GameTypes.CANNON))); + + // If it exists, replace its implementation. + // We're reusing the same DelayedWETH and ASR contracts. + if (address(permissionlessDisputeGame) != address(0)) { + deployAndSetNewGameImpl({ + _l2ChainId: _l2ChainId, + _disputeGame: IDisputeGame(address(permissionlessDisputeGame)), + _newDelayedWeth: permissionlessDisputeGame.weth(), + _newAnchorStateRegistryProxy: permissionlessDisputeGame.anchorStateRegistry(), + _gameType: GameTypes.CANNON, + _opChainConfig: _opChainConfig + }); } } - /// @notice Retrieves the Superchain Config for a bridge contract - function getSuperchainConfig(address _hasSuperchainConfig) internal view returns (ISuperchainConfig) { - return IHasSuperchainConfig(_hasSuperchainConfig).superchainConfig(); + /// @notice Upgrades the SuperchainConfig contract. + /// @param _superchainConfig The SuperchainConfig contract to upgrade. + /// @param _superchainProxyAdmin The ProxyAdmin contract to use for the upgrade. + /// @dev This function is intended to be DELEGATECALLed by the superchainConfig's ProxyAdminOwner. + /// @dev This function will revert if the SuperchainConfig is already at or above the target version. + function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig, IProxyAdmin _superchainProxyAdmin) external { + // Only upgrade the superchainConfig if the current version is less than the target version. + if ( + SemverComp.gte( + _superchainConfig.version(), ISuperchainConfig(getImplementations().superchainConfigImpl).version() + ) + ) { + revert OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate(); + } + + // Grab the implementations. + OPContractsManager.Implementations memory impls = getImplementations(); + + // Attempt to upgrade. If the ProxyAdmin is not the SuperchainConfig's admin, this will revert. + upgradeTo(_superchainProxyAdmin, address(_superchainConfig), impls.superchainConfigImpl); } /// @notice Updates the implementation of a proxy without calling the initializer. @@ -915,7 +931,9 @@ contract OPContractsManagerUpgrader is OPContractsManagerBase { Blueprint.deployFrom( bps.permissionedDisputeGame1, bps.permissionedDisputeGame2, - computeSalt(_l2ChainId, reusableSaltMixer(_opChainConfig), "PermissionedDisputeGame"), + computeSalt( + _l2ChainId, reusableSaltMixer(_opChainConfig.systemConfigProxy), "PermissionedDisputeGame" + ), encodePermissionedFDGConstructor(params, proposer, challenger) ) ); @@ -924,7 +942,9 @@ contract OPContractsManagerUpgrader is OPContractsManagerBase { Blueprint.deployFrom( bps.permissionlessDisputeGame1, bps.permissionlessDisputeGame2, - computeSalt(_l2ChainId, reusableSaltMixer(_opChainConfig), "PermissionlessDisputeGame"), + computeSalt( + _l2ChainId, reusableSaltMixer(_opChainConfig.systemConfigProxy), "PermissionlessDisputeGame" + ), encodePermissionlessFDGConstructor(params) ) ); @@ -1077,18 +1097,38 @@ contract OPContractsManagerDeployer is OPContractsManagerBase { output.opChainProxyAdmin, address(output.l1ERC721BridgeProxy), implementation.l1ERC721BridgeImpl, data ); - data = encodeOptimismPortalInitializer(output); - upgradeToAndCall( - output.opChainProxyAdmin, address(output.optimismPortalProxy), implementation.optimismPortalImpl, data - ); - // Initialize the SystemConfig before the ETHLockbox, required because the ETHLockbox will - // try to get the SuperchainConfig from the SystemConfig inside of its initializer. + // try to get the SuperchainConfig from the SystemConfig inside of its initializer. Also + // need to initialize before OptimismPortal because OptimismPortal does some sanity checks + // based on the ETHLockbox feature flag. data = encodeSystemConfigInitializer(_input, output, _superchainConfig); upgradeToAndCall( output.opChainProxyAdmin, address(output.systemConfigProxy), implementation.systemConfigImpl, data ); + // If the interop feature was requested, enable the ETHLockbox feature in the SystemConfig + // contract. Only other way to get the ETHLockbox feature as of u16a is to have already had + // the ETHLockbox in U16 and then upgrade to U16a. + if (isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + output.systemConfigProxy.setFeature(Features.ETH_LOCKBOX, true); + } + + // Initialize the OptimismPortal. + if (isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + data = encodeOptimismPortalInteropInitializer(output); + upgradeToAndCall( + output.opChainProxyAdmin, + address(output.optimismPortalProxy), + implementation.optimismPortalInteropImpl, + data + ); + } else { + data = encodeOptimismPortalInitializer(output); + upgradeToAndCall( + output.opChainProxyAdmin, address(output.optimismPortalProxy), implementation.optimismPortalImpl, data + ); + } + // Initialize the ETHLockbox. IOptimismPortal[] memory portals = new IOptimismPortal[](1); portals[0] = output.optimismPortalProxy; @@ -1234,9 +1274,19 @@ contract OPContractsManagerDeployer is OPContractsManagerBase { view virtual returns (bytes memory) + { + return abi.encodeCall(IOptimismPortal.initialize, (_output.systemConfigProxy, _output.anchorStateRegistryProxy)); + } + + /// @notice Helper method for encoding the OptimismPortalInterop initializer data. + function encodeOptimismPortalInteropInitializer(OPContractsManager.DeployOutput memory _output) + internal + view + virtual + returns (bytes memory) { return abi.encodeCall( - IOptimismPortal.initialize, + IOptimismPortalInterop.initialize, (_output.systemConfigProxy, _output.anchorStateRegistryProxy, _output.ethLockboxProxy) ); } @@ -1415,9 +1465,9 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { } // Grab an array of portals from the configs. - IOptimismPortal[] memory portals = new IOptimismPortal[](_input.opChainConfigs.length); + IOptimismPortalInterop[] memory portals = new IOptimismPortalInterop[](_input.opChainConfigs.length); for (uint256 i = 0; i < _input.opChainConfigs.length; i++) { - portals[i] = IOptimismPortal(payable(_input.opChainConfigs[i].systemConfigProxy.optimismPortal())); + portals[i] = IOptimismPortalInterop(payable(_input.opChainConfigs[i].systemConfigProxy.optimismPortal())); } // Check that the portals have the same SuperchainConfig. @@ -1443,26 +1493,35 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { deployProxy({ _l2ChainId: block.timestamp, _proxyAdmin: _input.opChainConfigs[0].proxyAdmin, - _saltMixer: reusableSaltMixer(_input.opChainConfigs[0]), + _saltMixer: reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), _contractName: "ETHLockbox-Interop" }) ); - // Initialize the new ETHLockbox. - // Note that this authorizes the portals to use the ETHLockbox. - upgradeToAndCall( - _input.opChainConfigs[0].proxyAdmin, - address(newEthLockbox), - getImplementations().ethLockboxImpl, - abi.encodeCall(IETHLockbox.initialize, (portals[0].systemConfig(), portals)) - ); + // Separate context to avoid stack too deep. + { + // Lockbox requires standard portal interfaces, need to cast to IOptimismPortal. + IOptimismPortal[] memory castedPortals; + assembly ("memory-safe") { + castedPortals := portals + } + + // Initialize the new ETHLockbox. + // Note that this authorizes the portals to use the ETHLockbox. + upgradeToAndCall( + _input.opChainConfigs[0].proxyAdmin, + address(newEthLockbox), + getImplementations().ethLockboxImpl, + abi.encodeCall(IETHLockbox.initialize, (portals[0].systemConfig(), castedPortals)) + ); + } // Deploy the new DisputeGameFactory. IDisputeGameFactory newDisputeGameFactory = IDisputeGameFactory( deployProxy({ _l2ChainId: block.timestamp, _proxyAdmin: _input.opChainConfigs[0].proxyAdmin, - _saltMixer: reusableSaltMixer(_input.opChainConfigs[0]), + _saltMixer: reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), _contractName: "DisputeGameFactory-Interop" }) ); @@ -1480,7 +1539,7 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { deployProxy({ _l2ChainId: block.timestamp, _proxyAdmin: _input.opChainConfigs[0].proxyAdmin, - _saltMixer: reusableSaltMixer(_input.opChainConfigs[0]), + _saltMixer: reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), _contractName: "AnchorStateRegistry-Interop" }) ); @@ -1522,6 +1581,10 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { oldDisputeGameFactory.setImplementation(GameTypes.SUPER_CANNON, IDisputeGame(address(0))); oldDisputeGameFactory.setImplementation(GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(0))); oldDisputeGameFactory.setImplementation(GameTypes.SUPER_PERMISSIONED_CANNON, IDisputeGame(address(0))); + if (isDevFeatureEnabled(DevFeatures.CANNON_KONA)) { + oldDisputeGameFactory.setImplementation(GameTypes.CANNON_KONA, IDisputeGame(address(0))); + oldDisputeGameFactory.setImplementation(GameTypes.SUPER_CANNON_KONA, IDisputeGame(address(0))); + } // Migrate the portal to the new ETHLockbox and AnchorStateRegistry. portals[i].migrateToSuperRoots(newEthLockbox, newAnchorStateRegistry); @@ -1535,7 +1598,7 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { deployProxy({ _l2ChainId: block.timestamp, _proxyAdmin: _input.opChainConfigs[0].proxyAdmin, - _saltMixer: reusableSaltMixer(_input.opChainConfigs[0]), + _saltMixer: reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), _contractName: "DelayedWETH-Interop-Permissioned" }) ) @@ -1559,7 +1622,9 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { blueprints().superPermissionedDisputeGame1, blueprints().superPermissionedDisputeGame2, computeSalt( - block.timestamp, reusableSaltMixer(_input.opChainConfigs[0]), "SuperPermissionedDisputeGame" + block.timestamp, + reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), + "SuperPermissionedDisputeGame" ), encodePermissionedSuperFDGConstructor( ISuperFaultDisputeGame.GameConstructorParams({ @@ -1595,7 +1660,7 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { deployProxy({ _l2ChainId: block.timestamp, _proxyAdmin: _input.opChainConfigs[0].proxyAdmin, - _saltMixer: reusableSaltMixer(_input.opChainConfigs[0]), + _saltMixer: reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), _contractName: "DelayedWETH-Interop-Permissionless" }) ) @@ -1614,7 +1679,11 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { Blueprint.deployFrom( blueprints().superPermissionlessDisputeGame1, blueprints().superPermissionlessDisputeGame2, - computeSalt(block.timestamp, reusableSaltMixer(_input.opChainConfigs[0]), "SuperFaultDisputeGame"), + computeSalt( + block.timestamp, + reusableSaltMixer(_input.opChainConfigs[0].systemConfigProxy), + "SuperFaultDisputeGame" + ), encodePermissionlessSuperFDGConstructor( ISuperFaultDisputeGame.GameConstructorParams({ gameType: GameTypes.SUPER_CANNON, @@ -1640,11 +1709,6 @@ contract OPContractsManagerInteropMigrator is OPContractsManagerBase { } contract OPContractsManager is ISemver { - // -------- Events -------- - - /// @notice Emitted when the OPCM setRC function is called. - event Released(bool _isRC); - // -------- Structs -------- /// @notice Represents the roles that can be set when deploying a standard OP Stack chain. @@ -1724,6 +1788,7 @@ contract OPContractsManager is ISemver { address protocolVersionsImpl; address l1ERC721BridgeImpl; address optimismPortalImpl; + address optimismPortalInteropImpl; address ethLockboxImpl; address systemConfigImpl; address optimismMintableERC20FactoryImpl; @@ -1742,6 +1807,13 @@ contract OPContractsManager is ISemver { Claim absolutePrestate; } + /// @notice The input required to identify a chain for updating prestates + struct UpdatePrestateInput { + ISystemConfig systemConfigProxy; + Claim cannonPrestate; + Claim cannonKonaPrestate; + } + struct AddGameInput { string saltMixer; ISystemConfig systemConfig; @@ -1765,9 +1837,9 @@ contract OPContractsManager is ISemver { // -------- Constants and Variables -------- - /// @custom:semver 2.6.0 + /// @custom:semver 4.0.0 function version() public pure virtual returns (string memory) { - return "2.6.0"; + return "4.0.0"; } OPContractsManagerGameTypeAdder public immutable opcmGameTypeAdder; @@ -1789,30 +1861,12 @@ contract OPContractsManager is ISemver { /// @notice Address of the SuperchainProxyAdmin contract shared by all chains. IProxyAdmin public immutable superchainProxyAdmin; - /// @notice L1 smart contracts release deployed by this version of OPCM. This is used in opcm to signal which - /// version of the L1 smart contracts is deployed. It takes the format of `op-contracts/vX.Y.Z`. - string internal L1_CONTRACTS_RELEASE; - /// @notice The OPContractsManager contract that is currently being used. This is needed in the upgrade function /// which is intended to be DELEGATECALLed. OPContractsManager internal immutable thisOPCM; - /// @notice The address of the upgrade controller. - address public immutable upgradeController; - - /// @notice Whether this is a release candidate. - bool public isRC = true; - - /// @notice Returns the release string. Appends "-rc" if this is a release candidate. - function l1ContractsRelease() external view virtual returns (string memory) { - return isRC ? string.concat(L1_CONTRACTS_RELEASE, "-rc") : L1_CONTRACTS_RELEASE; - } - // -------- Errors -------- - /// @notice Thrown when an address other than the upgrade controller calls the setRC function. - error OnlyUpgradeController(); - /// @notice Thrown when an address is the zero address. error AddressNotFound(address who); @@ -1862,9 +1916,7 @@ contract OPContractsManager is ISemver { OPContractsManagerStandardValidator _opcmStandardValidator, ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions, - IProxyAdmin _superchainProxyAdmin, - string memory _l1ContractsRelease, - address _upgradeController + IProxyAdmin _superchainProxyAdmin ) { _opcmDeployer.assertValidContractAddress(address(_superchainConfig)); _opcmDeployer.assertValidContractAddress(address(_protocolVersions)); @@ -1881,9 +1933,7 @@ contract OPContractsManager is ISemver { superchainConfig = _superchainConfig; protocolVersions = _protocolVersions; superchainProxyAdmin = _superchainProxyAdmin; - L1_CONTRACTS_RELEASE = _l1ContractsRelease; thisOPCM = this; - upgradeController = _upgradeController; } /// @notice Validates the configuration of the L1 contracts. @@ -1921,19 +1971,26 @@ contract OPContractsManager is ISemver { /// @notice Upgrades a set of chains to the latest implementation contracts /// @param _opChainConfigs Array of OpChain structs, one per chain to upgrade - /// @dev This function is intended to be called via DELEGATECALL from the Upgrade Controller Safe + /// @dev This function is intended to be DELEGATECALLed by an address that is the common owner of every chain in + /// `_opChainConfigs`'s ProxyAdmin. + /// @dev This function requires that each chain's superchainConfig is already upgraded. function upgrade(OpChainConfig[] memory _opChainConfigs) external virtual { if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); - // If this is delegatecalled by the upgrade controller, set isRC to false first, else, continue execution. - if (address(this) == upgradeController) { - // Set isRC to false. - // This function asserts that the caller is the upgrade controller. - thisOPCM.setRC(false); - } + bytes memory data = abi.encodeCall(OPContractsManagerUpgrader.upgrade, (_opChainConfigs)); + _performDelegateCall(address(opcmUpgrader), data); + } + + /// @notice Upgrades the SuperchainConfig contract. + /// @param _superchainConfig The SuperchainConfig contract to upgrade. + /// @param _superchainProxyAdmin The ProxyAdmin contract to use for the upgrade. + /// @dev This function is intended to be DELEGATECALLed by the superchainConfig's ProxyAdminOwner. + /// @dev This function will revert if the SuperchainConfig is already at or above the target version. + function upgradeSuperchainConfig(ISuperchainConfig _superchainConfig, IProxyAdmin _superchainProxyAdmin) external { + if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall( - OPContractsManagerUpgrader.upgrade, (superchainConfig, superchainProxyAdmin, _opChainConfigs) + OPContractsManagerUpgrader.upgradeSuperchainConfig, (_superchainConfig, _superchainProxyAdmin) ); _performDelegateCall(address(opcmUpgrader), data); } @@ -1949,9 +2006,9 @@ contract OPContractsManager is ISemver { return abi.decode(returnData, (AddGameOutput[])); } - /// @notice Updates the prestate hash for a new game type while keeping all other parameters the same - /// @param _prestateUpdateInputs The new prestate hash to use - function updatePrestate(OpChainConfig[] memory _prestateUpdateInputs) public { + /// @notice Updates the prestate hash for dispute games while keeping all other parameters the same + /// @param _prestateUpdateInputs The new prestate hashes to use + function updatePrestate(UpdatePrestateInput[] memory _prestateUpdateInputs) public { if (address(this) == address(thisOPCM)) revert OnlyDelegatecall(); bytes memory data = abi.encodeCall(OPContractsManagerGameTypeAdder.updatePrestate, (_prestateUpdateInputs)); @@ -1986,12 +2043,20 @@ contract OPContractsManager is ISemver { return opcmDeployer.implementations(); } - /// @notice Sets the RC flag. - function setRC(bool _isRC) external { - if (msg.sender != upgradeController) revert OnlyUpgradeController(); - isRC = _isRC; + /// @notice Retrieves the development feature bitmap stored in this OPCM contract + /// @return The development feature bitmap. + function devFeatureBitmap() public view returns (bytes32) { + return opcmDeployer.devFeatureBitmap(); + } - emit Released(_isRC); + /// @notice Returns the status of a development feature. Note that this function does not check + /// that the input feature represents a single feature and the bitwise AND operation + /// allows for multiple features to be enabled at once. Users should generally check + /// for only a single feature at a time. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _feature) public view returns (bool) { + return opcmDeployer.isDevFeatureEnabled(_feature); } /// @notice Helper function to perform a delegatecall to a target contract diff --git a/packages/contracts-bedrock/src/L1/OPContractsManagerStandardValidator.sol b/packages/contracts-bedrock/src/L1/OPContractsManagerStandardValidator.sol index b9d23ee8506ad..f82b21b248c57 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManagerStandardValidator.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManagerStandardValidator.sol @@ -8,6 +8,8 @@ import { Duration } from "src/dispute/lib/LibUDT.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Hash } from "src/dispute/lib/Types.sol"; +import { Features } from "src/libraries/Features.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; @@ -36,8 +38,8 @@ import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; /// before and after an upgrade. contract OPContractsManagerStandardValidator is ISemver { /// @notice The semantic version of the OPContractsManagerStandardValidator contract. - /// @custom:semver 1.5.0 - string public constant version = "1.5.0"; + /// @custom:semver 1.18.0 + string public constant version = "1.18.0"; /// @notice The SuperchainConfig contract. ISuperchainConfig public superchainConfig; @@ -59,6 +61,9 @@ contract OPContractsManagerStandardValidator is ISemver { /// @notice The OptimismPortal implementation address. address public optimismPortalImpl; + /// @notice The OptimismPortalInterop implementation address. + address public optimismPortalInteropImpl; + /// @notice The ETHLockbox implementation address. address public ethLockboxImpl; @@ -86,10 +91,14 @@ contract OPContractsManagerStandardValidator is ISemver { /// @notice The MIPS implementation address. address public mipsImpl; + /// @notice Bitmap of development features, verification may depend on these features. + bytes32 public devFeatureBitmap; + /// @notice Struct containing the implementation addresses of the L1 contracts. struct Implementations { address l1ERC721BridgeImpl; address optimismPortalImpl; + address optimismPortalInteropImpl; address ethLockboxImpl; address systemConfigImpl; address optimismMintableERC20FactoryImpl; @@ -121,16 +130,19 @@ contract OPContractsManagerStandardValidator is ISemver { ISuperchainConfig _superchainConfig, address _l1PAOMultisig, address _challenger, - uint256 _withdrawalDelaySeconds + uint256 _withdrawalDelaySeconds, + bytes32 _devFeatureBitmap ) { superchainConfig = _superchainConfig; l1PAOMultisig = _l1PAOMultisig; challenger = _challenger; withdrawalDelaySeconds = _withdrawalDelaySeconds; + devFeatureBitmap = _devFeatureBitmap; // Set implementation addresses from struct l1ERC721BridgeImpl = _implementations.l1ERC721BridgeImpl; optimismPortalImpl = _implementations.optimismPortalImpl; + optimismPortalInteropImpl = _implementations.optimismPortalInteropImpl; ethLockboxImpl = _implementations.ethLockboxImpl; systemConfigImpl = _implementations.systemConfigImpl; optimismMintableERC20FactoryImpl = _implementations.optimismMintableERC20FactoryImpl; @@ -174,59 +186,9 @@ contract OPContractsManagerStandardValidator is ISemver { return challenger; } - /// @notice Returns the expected SystemConfig version. - function systemConfigVersion() public pure returns (string memory) { - return "3.4.0"; - } - - /// @notice Returns the expected OptimismPortal version. - function optimismPortalVersion() public pure returns (string memory) { - return "4.6.0"; - } - - /// @notice Returns the expected L1CrossDomainMessenger version. - function l1CrossDomainMessengerVersion() public pure returns (string memory) { - return "2.9.0"; - } - - /// @notice Returns the expected L1ERC721Bridge version. - function l1ERC721BridgeVersion() public pure returns (string memory) { - return "2.7.0"; - } - - /// @notice Returns the expected L1StandardBridge version. - function l1StandardBridgeVersion() public pure returns (string memory) { - return "2.6.0"; - } - - /// @notice Returns the expected MIPS version. - function mipsVersion() public pure returns (string memory) { - return "1.8.0"; - } - - /// @notice Returns the expected OptimismMintableERC20Factory version. - function optimismMintableERC20FactoryVersion() public pure returns (string memory) { - return "1.10.1"; - } - - /// @notice Returns the expected DisputeGameFactory version. - function disputeGameFactoryVersion() public pure returns (string memory) { - return "1.2.0"; - } - - /// @notice Returns the expected AnchorStateRegistry version. - function anchorStateRegistryVersion() public pure returns (string memory) { - return "3.5.0"; - } - - /// @notice Returns the expected DelayedWETH version. - function delayedWETHVersion() public pure returns (string memory) { - return "1.5.0"; - } - /// @notice Returns the expected PermissionedDisputeGame version. function permissionedDisputeGameVersion() public pure returns (string memory) { - return "1.7.0"; + return "1.8.0"; } /// @notice Returns the expected PreimageOracle version. @@ -234,11 +196,6 @@ contract OPContractsManagerStandardValidator is ISemver { return "1.1.4"; } - /// @notice Returns the expected ETHLockbox version. - function ethLockboxVersion() public pure returns (string memory) { - return "1.2.0"; - } - /// @notice Internal function to get version from any contract implementing ISemver. function getVersion(address _contract) private view returns (string memory) { return ISemver(_contract).version(); @@ -286,8 +243,9 @@ contract OPContractsManagerStandardValidator is ISemver { virtual returns (string memory) { - _errors = - internalRequire(LibString.eq(getVersion(address(_sysCfg)), systemConfigVersion()), "SYSCON-10", _errors); + _errors = internalRequire( + LibString.eq(getVersion(address(_sysCfg)), getVersion(systemConfigImpl)), "SYSCON-10", _errors + ); _errors = internalRequire(_sysCfg.gasLimit() <= uint64(500_000_000), "SYSCON-20", _errors); _errors = internalRequire(_sysCfg.scalar() != 0, "SYSCON-30", _errors); _errors = @@ -319,7 +277,7 @@ contract OPContractsManagerStandardValidator is ISemver { { IL1CrossDomainMessenger _messenger = IL1CrossDomainMessenger(_sysCfg.l1CrossDomainMessenger()); _errors = internalRequire( - LibString.eq(getVersion(address(_messenger)), l1CrossDomainMessengerVersion()), "L1xDM-10", _errors + LibString.eq(getVersion(address(_messenger)), getVersion(l1CrossDomainMessengerImpl)), "L1xDM-10", _errors ); _errors = internalRequire( getProxyImplementation(_admin, address(_messenger)) == l1CrossDomainMessengerImpl, "L1xDM-20", _errors @@ -351,8 +309,9 @@ contract OPContractsManagerStandardValidator is ISemver { returns (string memory) { IL1StandardBridge _bridge = IL1StandardBridge(payable(_sysCfg.l1StandardBridge())); - _errors = - internalRequire(LibString.eq(getVersion(address(_bridge)), l1StandardBridgeVersion()), "L1SB-10", _errors); + _errors = internalRequire( + LibString.eq(getVersion(address(_bridge)), getVersion(l1StandardBridgeImpl)), "L1SB-10", _errors + ); _errors = internalRequire( getProxyImplementation(_admin, address(_bridge)) == l1StandardBridgeImpl, "L1SB-20", _errors ); @@ -380,7 +339,9 @@ contract OPContractsManagerStandardValidator is ISemver { { IOptimismMintableERC20Factory _factory = IOptimismMintableERC20Factory(_sysCfg.optimismMintableERC20Factory()); _errors = internalRequire( - LibString.eq(getVersion(address(_factory)), optimismMintableERC20FactoryVersion()), "MERC20F-10", _errors + LibString.eq(getVersion(address(_factory)), getVersion(optimismMintableERC20FactoryImpl)), + "MERC20F-10", + _errors ); _errors = internalRequire( getProxyImplementation(_admin, address(_factory)) == optimismMintableERC20FactoryImpl, "MERC20F-20", _errors @@ -403,8 +364,9 @@ contract OPContractsManagerStandardValidator is ISemver { returns (string memory) { IL1ERC721Bridge _bridge = IL1ERC721Bridge(_sysCfg.l1ERC721Bridge()); - _errors = - internalRequire(LibString.eq(getVersion(address(_bridge)), l1ERC721BridgeVersion()), "L721B-10", _errors); + _errors = internalRequire( + LibString.eq(getVersion(address(_bridge)), getVersion(l1ERC721BridgeImpl)), "L721B-10", _errors + ); _errors = internalRequire(getProxyImplementation(_admin, address(_bridge)) == l1ERC721BridgeImpl, "L721B-20", _errors); @@ -429,11 +391,24 @@ contract OPContractsManagerStandardValidator is ISemver { returns (string memory) { IOptimismPortal2 _portal = IOptimismPortal2(payable(_sysCfg.optimismPortal())); - _errors = - internalRequire(LibString.eq(getVersion(address(_portal)), optimismPortalVersion()), "PORTAL-10", _errors); - _errors = internalRequire( - getProxyImplementation(_admin, address(_portal)) == optimismPortalImpl, "PORTAL-20", _errors - ); + + if (DevFeatures.isDevFeatureEnabled(devFeatureBitmap, DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + _errors = internalRequire( + LibString.eq(getVersion(address(_portal)), string.concat(getVersion(optimismPortalInteropImpl))), + "PORTAL-10", + _errors + ); + _errors = internalRequire( + getProxyImplementation(_admin, address(_portal)) == optimismPortalInteropImpl, "PORTAL-20", _errors + ); + } else { + _errors = internalRequire( + LibString.eq(getVersion(address(_portal)), getVersion(optimismPortalImpl)), "PORTAL-10", _errors + ); + _errors = internalRequire( + getProxyImplementation(_admin, address(_portal)) == optimismPortalImpl, "PORTAL-20", _errors + ); + } IDisputeGameFactory _dgf = IDisputeGameFactory(_sysCfg.disputeGameFactory()); _errors = internalRequire(address(_portal.disputeGameFactory()) == address(_dgf), "PORTAL-30", _errors); @@ -456,8 +431,14 @@ contract OPContractsManagerStandardValidator is ISemver { IOptimismPortal2 _portal = IOptimismPortal2(payable(_sysCfg.optimismPortal())); IETHLockbox _lockbox = IETHLockbox(_portal.ethLockbox()); - _errors = - internalRequire(LibString.eq(getVersion(address(_lockbox)), ethLockboxVersion()), "LOCKBOX-10", _errors); + // If this chain isn't using the ETHLockbox, skip the validation. + if (!_sysCfg.isFeatureEnabled(Features.ETH_LOCKBOX)) { + return _errors; + } + + _errors = internalRequire( + LibString.eq(getVersion(address(_lockbox)), getVersion(ethLockboxImpl)), "LOCKBOX-10", _errors + ); _errors = internalRequire(getProxyImplementation(_admin, address(_lockbox)) == ethLockboxImpl, "LOCKBOX-20", _errors); _errors = internalRequire(getProxyAdmin(address(_lockbox)) == _admin, "LOCKBOX-30", _errors); @@ -479,8 +460,9 @@ contract OPContractsManagerStandardValidator is ISemver { { address _l1PAOMultisig = expectedL1PAOMultisig(_overrides); IDisputeGameFactory _factory = IDisputeGameFactory(_sysCfg.disputeGameFactory()); - _errors = - internalRequire(LibString.eq(getVersion(address(_factory)), disputeGameFactoryVersion()), "DF-10", _errors); + _errors = internalRequire( + LibString.eq(getVersion(address(_factory)), getVersion(disputeGameFactoryImpl)), "DF-10", _errors + ); _errors = internalRequire( getProxyImplementation(_admin, address(_factory)) == disputeGameFactoryImpl, "DF-20", _errors ); @@ -644,7 +626,9 @@ contract OPContractsManagerStandardValidator is ISemver { { _errorPrefix = string.concat(_errorPrefix, "-DWETH"); _errors = internalRequire( - LibString.eq(getVersion(address(_weth)), delayedWETHVersion()), string.concat(_errorPrefix, "-10"), _errors + LibString.eq(getVersion(address(_weth)), getVersion(delayedWETHImpl)), + string.concat(_errorPrefix, "-10"), + _errors ); _errors = internalRequire( getProxyImplementation(_admin, address(_weth)) == delayedWETHImpl, @@ -676,7 +660,7 @@ contract OPContractsManagerStandardValidator is ISemver { { _errorPrefix = string.concat(_errorPrefix, "-ANCHORP"); _errors = internalRequire( - LibString.eq(getVersion(address(_asr)), anchorStateRegistryVersion()), + LibString.eq(getVersion(address(_asr)), getVersion(anchorStateRegistryImpl)), string.concat(_errorPrefix, "-10"), _errors ); @@ -707,9 +691,9 @@ contract OPContractsManagerStandardValidator is ISemver { _errorPrefix = string.concat(_errorPrefix, "-VM"); _errors = internalRequire(address(_mips) == mipsImpl, string.concat(_errorPrefix, "-10"), _errors); _errors = internalRequire( - LibString.eq(getVersion(address(_mips)), mipsVersion()), string.concat(_errorPrefix, "-20"), _errors + LibString.eq(getVersion(address(_mips)), getVersion(mipsImpl)), string.concat(_errorPrefix, "-20"), _errors ); - _errors = internalRequire(_mips.stateVersion() == 7, string.concat(_errorPrefix, "-30"), _errors); + _errors = internalRequire(_mips.stateVersion() == 8, string.concat(_errorPrefix, "-30"), _errors); return _errors; } diff --git a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol index d726fc94b4121..50b1d35e407da 100644 --- a/packages/contracts-bedrock/src/L1/OptimismPortal2.sol +++ b/packages/contracts-bedrock/src/L1/OptimismPortal2.sol @@ -17,6 +17,7 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { SecureMerkleTrie } from "src/libraries/trie/SecureMerkleTrie.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import { GameStatus, GameType } from "src/dispute/lib/Types.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { ISemver } from "interfaces/universal/ISemver.sol"; @@ -124,11 +125,15 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase /// @notice Address of the AnchorStateRegistry contract. IAnchorStateRegistry public anchorStateRegistry; - /// @notice Address of the ETHLockbox contract. + /// @notice Address of the ETHLockbox contract. NOTE that as of v4.1.0 it is not possible to + /// set this value in storage and it is only possible for this value to be set if the + /// chain was first upgraded to v4.0.0. Chains that skip v4.0.0 will not have any + /// ETHLockbox set here. IETHLockbox public ethLockbox; - /// @notice Whether the OptimismPortal is using Super Roots or Output Roots. - bool public superRootsActive; + /// @custom:legacy + /// @custom:spacer superRootsActive + bool private spacer_63_20_1; // keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.OptimismPortal2.QKCConfigStorage")) - 1)) & // ~bytes32(uint256(0xff)) @@ -174,24 +179,12 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase /// @param success Whether the withdrawal transaction was successful. event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success); - /// @notice Emitted when the total ETH balance is migrated to the ETHLockbox. - /// @param lockbox The address of the ETHLockbox contract. - /// @param ethBalance Amount of ETH migrated. - event ETHMigrated(address indexed lockbox, uint256 ethBalance); - - /// @notice Emitted when the ETHLockbox contract is updated. - /// @param oldLockbox The address of the old ETHLockbox contract. - /// @param newLockbox The address of the new ETHLockbox contract. - /// @param oldAnchorStateRegistry The address of the old AnchorStateRegistry contract. - /// @param newAnchorStateRegistry The address of the new AnchorStateRegistry contract. - event PortalMigrated( - IETHLockbox oldLockbox, - IETHLockbox newLockbox, - IAnchorStateRegistry oldAnchorStateRegistry, - IAnchorStateRegistry newAnchorStateRegistry - ); + /// QKC changes: + /// @notice added back by QKC + error OptimismPortal_Unauthorized(); /// @notice Emitted when a minter is set. + event MinterSet(address indexed minter); /// @notice Emitted when native deposit is disabled. event NativeDepositDisabled(); @@ -247,32 +240,17 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase /// @notice Thrown when a withdrawal has not been proven. error OptimismPortal_Unproven(); - /// @notice Thrown when the caller is not authorized to call the function. - error OptimismPortal_Unauthorized(); - - /// @notice Thrown when the wrong proof method is used. - error OptimismPortal_WrongProofMethod(); - - /// @notice Thrown when a super root proof is invalid. - error OptimismPortal_InvalidSuperRootProof(); - - /// @notice Thrown when an output root index is invalid. - error OptimismPortal_InvalidOutputRootIndex(); - - /// @notice Thrown when an output root chain id is invalid. - error OptimismPortal_InvalidOutputRootChainId(); - - /// @notice Thrown when trying to migrate to the same AnchorStateRegistry. - error OptimismPortal_MigratingToSameRegistry(); + /// @notice Thrown when ETHLockbox is set/unset incorrectly depending on the feature flag. + error OptimismPortal_InvalidLockboxState(); /// @notice Semantic version. - /// @custom:semver 4.6.0 + /// @custom:semver 5.1.1 function version() public pure virtual returns (string memory) { - return "4.6.0"; + return "5.1.1"; } /// @param _proofMaturityDelaySeconds The proof maturity delay in seconds. - constructor(uint256 _proofMaturityDelaySeconds) ReinitializableBase(2) { + constructor(uint256 _proofMaturityDelaySeconds) ReinitializableBase(3) { PROOF_MATURITY_DELAY_SECONDS = _proofMaturityDelaySeconds; _disableInitializers(); } @@ -280,11 +258,9 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase /// @notice Initializer. /// @param _systemConfig Address of the SystemConfig. /// @param _anchorStateRegistry Address of the AnchorStateRegistry. - /// @param _ethLockbox Contract of the ETHLockbox. function initialize( ISystemConfig _systemConfig, - IAnchorStateRegistry _anchorStateRegistry, - IETHLockbox _ethLockbox + IAnchorStateRegistry _anchorStateRegistry ) external reinitializer(initVersion()) @@ -295,7 +271,9 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // Now perform initialization logic. systemConfig = _systemConfig; anchorStateRegistry = _anchorStateRegistry; - ethLockbox = _ethLockbox; + + // Assert that the lockbox state is valid. + _assertValidLockboxState(); // Set the l2Sender slot, only if it is currently empty. This signals the first // initialization of the contract. @@ -303,27 +281,10 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase l2Sender = Constants.DEFAULT_L2_SENDER; } + // Initialize the ResourceMetering contract. __ResourceMetering_init(); } - /// @notice Upgrades the OptimismPortal contract to have a reference to the AnchorStateRegistry and SystemConfig - /// @param _anchorStateRegistry AnchorStateRegistry contract. - /// @param _ethLockbox ETHLockbox contract. - function upgrade( - IAnchorStateRegistry _anchorStateRegistry, - IETHLockbox _ethLockbox - ) - external - reinitializer(initVersion()) - { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - anchorStateRegistry = _anchorStateRegistry; - ethLockbox = _ethLockbox; - } - /// @notice Getter for the current paused status. function paused() public view returns (bool) { return systemConfig.paused(); @@ -404,98 +365,7 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // Intentionally empty. } - /// @notice Migrates the total ETH balance to the ETHLockbox. - function migrateLiquidity() public { - // Liquidity migration can only be triggered by the ProxyAdmin owner. - _assertOnlyProxyAdminOwner(); - - // Migrate the liquidity. - uint256 ethBalance = address(this).balance; - ethLockbox.lockETH{ value: ethBalance }(); - emit ETHMigrated(address(ethLockbox), ethBalance); - } - - /// @notice Allows the owner of the ProxyAdmin to migrate the OptimismPortal to use a new - /// lockbox, point at a new AnchorStateRegistry, and start to use the Super Roots proof - /// method. Primarily used for OptimismPortal instances to join the interop set, but - /// can also be used to swap the proof method from Output Roots to Super Roots if the - /// provided lockbox is the same as the current one. - /// @dev It is possible to change lockboxes without migrating liquidity. This can cause one - /// of the OptimismPortal instances connected to the new lockbox to not be able to - /// unlock sufficient ETH to finalize withdrawals which would trigger reverts. To avoid - /// this issue, guarantee that this function is called atomically alongside the - /// ETHLockbox.migrateLiquidity() function within the same transaction. - /// @param _newLockbox The address of the new ETHLockbox contract. - /// @param _newAnchorStateRegistry The address of the new AnchorStateRegistry contract. - function migrateToSuperRoots(IETHLockbox _newLockbox, IAnchorStateRegistry _newAnchorStateRegistry) external { - // Migration can only be triggered when the system is not paused because the migration can - // potentially unpause the system as a result of the modified ETHLockbox address. - _assertNotPaused(); - - // Migration can only be triggered by the ProxyAdmin owner. - _assertOnlyProxyAdminOwner(); - - // Chains can use this method to swap the proof method from Output Roots to Super Roots - // without joining the interop set. In this case, the old and new lockboxes will be the - // same. However, whether or not a chain is joining the interop set, all chains will need a - // new AnchorStateRegistry when migrating to Super Roots. We therefore check that the new - // AnchorStateRegistry is different than the old one to prevent this function from being - // accidentally misused. - if (anchorStateRegistry == _newAnchorStateRegistry) { - revert OptimismPortal_MigratingToSameRegistry(); - } - - // Update the ETHLockbox. - IETHLockbox oldLockbox = ethLockbox; - ethLockbox = _newLockbox; - - // Update the AnchorStateRegistry. - IAnchorStateRegistry oldAnchorStateRegistry = anchorStateRegistry; - anchorStateRegistry = _newAnchorStateRegistry; - - // Set the proof method to Super Roots. We expect that migration will happen more than once - // for some chains (switching to single-chain Super Roots and then later joining the - // interop set) so we don't need to check that this is false. - superRootsActive = true; - - // Emit a PortalMigrated event. - emit PortalMigrated(oldLockbox, _newLockbox, oldAnchorStateRegistry, _newAnchorStateRegistry); - } - - /// @notice Proves a withdrawal transaction using a Super Root proof. Only callable when the - /// OptimismPortal is using Super Roots (superRootsActive flag is true). - /// @param _tx Withdrawal transaction to finalize. - /// @param _disputeGameProxy Address of the dispute game to prove the withdrawal against. - /// @param _outputRootIndex Index of the target Output Root within the Super Root. - /// @param _superRootProof Inclusion proof of the Output Root within the Super Root. - /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. - /// @param _withdrawalProof Inclusion proof of the withdrawal within the L2ToL1MessagePasser. - function proveWithdrawalTransaction( - Types.WithdrawalTransaction memory _tx, - IDisputeGame _disputeGameProxy, - uint256 _outputRootIndex, - Types.SuperRootProof calldata _superRootProof, - Types.OutputRootProof calldata _outputRootProof, - bytes[] calldata _withdrawalProof - ) - external - { - // Cannot prove withdrawal transactions while the system is paused. - _assertNotPaused(); - - // Make sure that the OptimismPortal is using Super Roots. - if (!superRootsActive) { - revert OptimismPortal_WrongProofMethod(); - } - - // Prove the transaction. - _proveWithdrawalTransaction( - _tx, _disputeGameProxy, _outputRootIndex, _superRootProof, _outputRootProof, _withdrawalProof - ); - } - - /// @notice Proves a withdrawal transaction using an Output Root proof. Only callable when the - /// OptimismPortal is using Output Roots (superRootsActive flag is false). + /// @notice Proves a withdrawal transaction using an Output Root proof. /// @param _tx Withdrawal transaction to finalize. /// @param _disputeGameIndex Index of the dispute game to prove the withdrawal against. /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. @@ -511,59 +381,26 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // Cannot prove withdrawal transactions while the system is paused. _assertNotPaused(); - // Make sure that the OptimismPortal is using Output Roots. - if (superRootsActive) { - revert OptimismPortal_WrongProofMethod(); - } - // Fetch the dispute game proxy from the `DisputeGameFactory` contract. (,, IDisputeGame disputeGameProxy) = disputeGameFactory().gameAtIndex(_disputeGameIndex); - // Create a dummy super root proof to pass into the internal function. Note that this is - // not a valid Super Root proof but it isn't used anywhere in the internal function when - // using Output Roots. - Types.SuperRootProof memory superRootProof; - - // Prove the transaction. - _proveWithdrawalTransaction(_tx, disputeGameProxy, 0, superRootProof, _outputRootProof, _withdrawalProof); - } - - /// @notice Internal function for proving a withdrawal transaction, used by both the Super Root - /// and Output Root proof functions. Will eventually be replaced with a single function - /// when the Output Root proof method is deprecated. - /// @param _tx Withdrawal transaction to prove. - /// @param _disputeGameProxy Address of the dispute game to prove the withdrawal against. - /// @param _outputRootIndex Index of the target Output Root within the Super Root. - /// @param _superRootProof Inclusion proof of the Output Root within the Super Root. - /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. - /// @param _withdrawalProof Inclusion proof of the withdrawal within the L2ToL1MessagePasser. - function _proveWithdrawalTransaction( - Types.WithdrawalTransaction memory _tx, - IDisputeGame _disputeGameProxy, - uint256 _outputRootIndex, - Types.SuperRootProof memory _superRootProof, - Types.OutputRootProof memory _outputRootProof, - bytes[] memory _withdrawalProof - ) - internal - { // Make sure that the target address is safe. if (_isUnsafeTarget(_tx.target)) { revert OptimismPortal_BadTarget(); } // Game must be a Proper Game. - if (!anchorStateRegistry.isGameProper(_disputeGameProxy)) { + if (!anchorStateRegistry.isGameProper(disputeGameProxy)) { revert OptimismPortal_ImproperDisputeGame(); } // Game must have been respected game type when created. - if (!anchorStateRegistry.isGameRespected(_disputeGameProxy)) { + if (!anchorStateRegistry.isGameRespected(disputeGameProxy)) { revert OptimismPortal_InvalidDisputeGame(); } // Game must not have resolved in favor of the Challenger (invalid root claim). - if (_disputeGameProxy.status() == GameStatus.CHALLENGER_WINS) { + if (disputeGameProxy.status() == GameStatus.CHALLENGER_WINS) { revert OptimismPortal_InvalidDisputeGame(); } @@ -571,37 +408,13 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // the dispute game's creation timestamp. Not strictly necessary but extra layer of // safety against weird bugs. Note that this blocks withdrawals from being proven in the // same block that a dispute game is created. - if (block.timestamp <= _disputeGameProxy.createdAt().raw()) { + if (block.timestamp <= disputeGameProxy.createdAt().raw()) { revert OptimismPortal_InvalidProofTimestamp(); } - // Validate the provided Output Root and/or Super Root proof depending on proof method. - if (superRootsActive) { - // Verify that the super root can be generated with the elements in the proof. - if (_disputeGameProxy.rootClaim().raw() != Hashing.hashSuperRootProof(_superRootProof)) { - revert OptimismPortal_InvalidSuperRootProof(); - } - - // Check that the index exists in the super root proof. - if (_outputRootIndex >= _superRootProof.outputRoots.length) { - revert OptimismPortal_InvalidOutputRootIndex(); - } - - // Check that the output root has the correct chain id. - Types.OutputRootWithChainId memory outputRoot = _superRootProof.outputRoots[_outputRootIndex]; - if (outputRoot.chainId != systemConfig.l2ChainId()) { - revert OptimismPortal_InvalidOutputRootChainId(); - } - - // Verify that the output root can be generated with the elements in the proof. - if (outputRoot.root != Hashing.hashOutputRootProof(_outputRootProof)) { - revert OptimismPortal_InvalidOutputRootProof(); - } - } else { - // Verify that the output root can be generated with the elements in the proof. - if (_disputeGameProxy.rootClaim().raw() != Hashing.hashOutputRootProof(_outputRootProof)) { - revert OptimismPortal_InvalidOutputRootProof(); - } + // Verify that the output root can be generated with the elements in the proof. + if (disputeGameProxy.rootClaim().raw() != Hashing.hashOutputRootProof(_outputRootProof)) { + revert OptimismPortal_InvalidOutputRootProof(); } // Load the ProvenWithdrawal into memory, using the withdrawal hash as a unique identifier. @@ -636,7 +449,7 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // the provenWithdrawals mapping. A given user may re-prove a withdrawalHash multiple // times, but each proof will reset the proof timer. provenWithdrawals[withdrawalHash][msg.sender] = - ProvenWithdrawal({ disputeGameProxy: _disputeGameProxy, timestamp: uint64(block.timestamp) }); + ProvenWithdrawal({ disputeGameProxy: disputeGameProxy, timestamp: uint64(block.timestamp) }); // Add the proof submitter to the list of proof submitters for this withdrawal hash. proofSubmitters[withdrawalHash].push(msg.sender); @@ -685,8 +498,10 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // Mark the withdrawal as finalized so it can't be replayed. finalizedWithdrawals[withdrawalHash] = true; - // Unlock the ETH from the ETHLockbox. - if (_tx.value > 0) ethLockbox.unlockETH(_tx.value); + // If using ETHLockbox, unlock the ETH from the ETHLockbox. + if (_isUsingLockbox()) { + if (_tx.value > 0) ethLockbox.unlockETH(_tx.value); + } // Set the l2Sender so contracts know who triggered this withdrawal on L2. l2Sender = _tx.sender; @@ -707,10 +522,12 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase // be achieved through contracts built on top of this contract emit WithdrawalFinalized(withdrawalHash, success); - // Send ETH back to the Lockbox in the case of a failed transaction or it'll get stuck here - // and would need to be moved back via the migrateLiquidity function. - if (!success && _tx.value > 0) { - ethLockbox.lockETH{ value: _tx.value }(); + // If using ETHLockbox, send ETH back to the Lockbox in the case of a failed transaction or + // it'll get stuck here and would need to be moved back via admin action. + if (_isUsingLockbox()) { + if (!success && _tx.value > 0) { + ethLockbox.lockETH{ value: _tx.value }(); + } } // Reverting here is useful for determining the exact gas cost to successfully execute the @@ -848,8 +665,10 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase revert OptimismPortal_NativeDepositForbidden(); } } - // Lock the ETH in the ETHLockbox. - if (msg.value > 0) ethLockbox.lockETH{ value: msg.value }(); + // If using ETHLockbox, lock the ETH in the ETHLockbox. + if (_isUsingLockbox()) { + if (msg.value > 0) ethLockbox.lockETH{ value: msg.value }(); + } // Just to be safe, make sure that people specify address(0) as the target when doing // contract creations. @@ -894,6 +713,12 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase return proofSubmitters[_withdrawalHash].length; } + /// @notice Checks if the ETHLockbox feature is enabled. + /// @return bool True if the ETHLockbox feature is enabled. + function _isUsingLockbox() internal view returns (bool) { + return systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX) && address(ethLockbox) != address(0); + } + /// @notice Asserts that the contract is not paused. function _assertNotPaused() internal view { if (paused()) { @@ -901,6 +726,16 @@ contract OptimismPortal2 is Initializable, ResourceMetering, ReinitializableBase } } + /// @notice Asserts that the ETHLockbox is set/unset correctly depending on the feature flag. + function _assertValidLockboxState() internal view { + if ( + systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX) && address(ethLockbox) == address(0) + || !systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX) && address(ethLockbox) != address(0) + ) { + revert OptimismPortal_InvalidLockboxState(); + } + } + /// @notice Checks if a target address is unsafe. function _isUnsafeTarget(address _target) internal view virtual returns (bool) { // Prevent users from targeting an unsafe target address on a withdrawal transaction. diff --git a/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol new file mode 100644 index 0000000000000..54db98d889f8f --- /dev/null +++ b/packages/contracts-bedrock/src/L1/OptimismPortalInterop.sol @@ -0,0 +1,816 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Contracts +import { ProxyAdminOwnedBase } from "src/L1/ProxyAdminOwnedBase.sol"; +import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; +import { ResourceMetering } from "src/L1/ResourceMetering.sol"; +import { ReinitializableBase } from "src/universal/ReinitializableBase.sol"; + +// Libraries +import { EOA } from "src/libraries/EOA.sol"; +import { SafeCall } from "src/libraries/SafeCall.sol"; +import { Constants } from "src/libraries/Constants.sol"; +import { Types } from "src/libraries/Types.sol"; +import { Hashing } from "src/libraries/Hashing.sol"; +import { SecureMerkleTrie } from "src/libraries/trie/SecureMerkleTrie.sol"; +import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; +import { GameStatus, GameType } from "src/dispute/lib/Types.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; +import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; +import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; + +/// @custom:proxied true +/// @title OptimismPortalInterop +/// @notice The OptimismPortal is a low-level contract responsible for passing messages between L1 +/// and L2. Messages sent directly to the OptimismPortal have no form of replayability. +/// Users are encouraged to use the L1CrossDomainMessenger for a higher-level interface. +contract OptimismPortalInterop is Initializable, ResourceMetering, ReinitializableBase, ProxyAdminOwnedBase, ISemver { + /// @notice Represents a proven withdrawal. + /// @custom:field disputeGameProxy Game that the withdrawal was proven against. + /// @custom:field timestamp Timestamp at which the withdrawal was proven. + struct ProvenWithdrawal { + IDisputeGame disputeGameProxy; + uint64 timestamp; + } + + /// @notice The delay between when a withdrawal is proven and when it may be finalized. + uint256 internal immutable PROOF_MATURITY_DELAY_SECONDS; + + /// @notice Version of the deposit event. + uint256 internal constant DEPOSIT_VERSION = 0; + + /// @notice The L2 gas limit set when eth is deposited using the receive() function. + uint64 internal constant RECEIVE_DEFAULT_GAS_LIMIT = 100_000; + + /// @notice Address of the L2 account which initiated a withdrawal in this transaction. + /// If the value of this variable is the default L2 sender address, then we are NOT + /// inside of a call to finalizeWithdrawalTransaction. + address public l2Sender; + + /// @notice A list of withdrawal hashes which have been successfully finalized. + mapping(bytes32 => bool) public finalizedWithdrawals; + + /// @custom:legacy + /// @custom:spacer provenWithdrawals + /// @notice Spacer taking up the legacy `provenWithdrawals` mapping slot. + bytes32 private spacer_52_0_32; + + /// @custom:legacy + /// @custom:spacer paused + /// @notice Spacer for backwards compatibility. + bool private spacer_53_0_1; + + /// @custom:legacy + /// @custom:spacer superchainConfig + /// @notice Spacer for backwards compatibility. + address private spacer_53_1_20; + + /// @custom:legacy + /// @custom:spacer l2Oracle + /// @notice Spacer taking up the legacy `l2Oracle` address slot. + address private spacer_54_0_20; + + /// @notice Address of the SystemConfig contract. + /// @custom:network-specific + ISystemConfig public systemConfig; + + /// @custom:network-specific + /// @custom:legacy + /// @custom:spacer disputeGameFactory + /// @notice Spacer taking up the legacy `disputeGameFactory` address slot. + address private spacer_56_0_20; + + /// @notice A mapping of withdrawal hashes to proof submitters to ProvenWithdrawal data. + mapping(bytes32 => mapping(address => ProvenWithdrawal)) public provenWithdrawals; + + /// @custom:legacy + /// @custom:spacer disputeGameBlacklist + bytes32 private spacer_58_0_32; + + /// @custom:legacy + /// @custom:spacer respectedGameType + GameType private spacer_59_0_4; + + /// @custom:legacy + /// @custom:spacer respectedGameTypeUpdatedAt + uint64 private spacer_59_4_8; + + /// @notice Mapping of withdrawal hashes to addresses that have submitted a proof for the + /// withdrawal. Original OptimismPortal contract only allowed one proof to be submitted + /// for any given withdrawal hash. Fault Proofs version of this contract must allow + /// multiple proofs for the same withdrawal hash to prevent a malicious user from + /// blocking other withdrawals by proving them against invalid proposals. Submitters + /// are tracked in an array to simplify the off-chain process of determining which + /// proof submission should be used when finalizing a withdrawal. + mapping(bytes32 => address[]) public proofSubmitters; + + /// @custom:legacy + /// @custom:spacer _balance + uint256 private spacer_61_0_32; + + /// @notice Address of the AnchorStateRegistry contract. + IAnchorStateRegistry public anchorStateRegistry; + + /// @notice Address of the ETHLockbox contract. + IETHLockbox public ethLockbox; + + /// @notice Whether the OptimismPortal is using Super Roots or Output Roots. + bool public superRootsActive; + + /// @notice Emitted when a transaction is deposited from L1 to L2. The parameters of this event + /// are read by the rollup node and used to derive deposit transactions on L2. + /// @param from Address that triggered the deposit transaction. + /// @param to Address that the deposit transaction is directed to. + /// @param version Version of this deposit transaction event. + /// @param opaqueData ABI encoded deposit data to be parsed off-chain. + event TransactionDeposited(address indexed from, address indexed to, uint256 indexed version, bytes opaqueData); + + /// @notice Emitted when a withdrawal transaction is proven. + /// @param withdrawalHash Hash of the withdrawal transaction. + /// @param from Address that triggered the withdrawal transaction. + /// @param to Address that the withdrawal transaction is directed to. + event WithdrawalProven(bytes32 indexed withdrawalHash, address indexed from, address indexed to); + + /// @notice Emitted when a withdrawal transaction is proven. Exists as a separate event to + /// allow for backwards compatibility for tooling that observes the WithdrawalProven + /// event. + /// @param withdrawalHash Hash of the withdrawal transaction. + /// @param proofSubmitter Address of the proof submitter. + event WithdrawalProvenExtension1(bytes32 indexed withdrawalHash, address indexed proofSubmitter); + + /// @notice Emitted when a withdrawal transaction is finalized. + /// @param withdrawalHash Hash of the withdrawal transaction. + /// @param success Whether the withdrawal transaction was successful. + event WithdrawalFinalized(bytes32 indexed withdrawalHash, bool success); + + /// @notice Emitted when the total ETH balance is migrated to the ETHLockbox. + /// @param lockbox The address of the ETHLockbox contract. + /// @param ethBalance Amount of ETH migrated. + event ETHMigrated(address indexed lockbox, uint256 ethBalance); + + /// @notice Emitted when the ETHLockbox contract is updated. + /// @param oldLockbox The address of the old ETHLockbox contract. + /// @param newLockbox The address of the new ETHLockbox contract. + /// @param oldAnchorStateRegistry The address of the old AnchorStateRegistry contract. + /// @param newAnchorStateRegistry The address of the new AnchorStateRegistry contract. + event PortalMigrated( + IETHLockbox oldLockbox, + IETHLockbox newLockbox, + IAnchorStateRegistry oldAnchorStateRegistry, + IAnchorStateRegistry newAnchorStateRegistry + ); + + /// @notice Thrown when a withdrawal has already been finalized. + error OptimismPortal_AlreadyFinalized(); + + /// @notice Thrown when the target of a withdrawal is unsafe. + error OptimismPortal_BadTarget(); + + /// @notice Thrown when the calldata for a deposit is too large. + error OptimismPortal_CalldataTooLarge(); + + /// @notice Thrown when the portal is paused. + error OptimismPortal_CallPaused(); + + /// @notice Thrown when a gas estimation transaction is being executed. + error OptimismPortal_GasEstimation(); + + /// @notice Thrown when the gas limit for a deposit is too low. + error OptimismPortal_GasLimitTooLow(); + + /// @notice Thrown when the target of a withdrawal is not a proper dispute game. + error OptimismPortal_ImproperDisputeGame(); + + /// @notice Thrown when a withdrawal has not been proven against a valid dispute game. + error OptimismPortal_InvalidDisputeGame(); + + /// @notice Thrown when a withdrawal has not been proven against a valid merkle proof. + error OptimismPortal_InvalidMerkleProof(); + + /// @notice Thrown when a withdrawal has not been proven against a valid output root proof. + error OptimismPortal_InvalidOutputRootProof(); + + /// @notice Thrown when a withdrawal's timestamp is not greater than the dispute game's creation timestamp. + error OptimismPortal_InvalidProofTimestamp(); + + /// @notice Thrown when the root claim of a dispute game is invalid. + error OptimismPortal_InvalidRootClaim(); + + /// @notice Thrown when a withdrawal is being finalized by a reentrant call. + error OptimismPortal_NoReentrancy(); + + /// @notice Thrown when a withdrawal has not been proven for long enough. + error OptimismPortal_ProofNotOldEnough(); + + /// @notice Thrown when a withdrawal has not been proven. + error OptimismPortal_Unproven(); + + /// @notice Thrown when the wrong proof method is used. + error OptimismPortal_WrongProofMethod(); + + /// @notice Thrown when a super root proof is invalid. + error OptimismPortal_InvalidSuperRootProof(); + + /// @notice Thrown when an output root index is invalid. + error OptimismPortal_InvalidOutputRootIndex(); + + /// @notice Thrown when an output root chain id is invalid. + error OptimismPortal_InvalidOutputRootChainId(); + + /// @notice Thrown when trying to migrate to the same AnchorStateRegistry. + error OptimismPortal_MigratingToSameRegistry(); + + /// @notice Semantic version. + /// @custom:semver 5.1.0+interop + function version() public pure virtual returns (string memory) { + return "5.1.0+interop"; + } + + /// @param _proofMaturityDelaySeconds The proof maturity delay in seconds. + constructor(uint256 _proofMaturityDelaySeconds) ReinitializableBase(4) { + PROOF_MATURITY_DELAY_SECONDS = _proofMaturityDelaySeconds; + _disableInitializers(); + } + + /// @notice Initializer. + /// @param _systemConfig Address of the SystemConfig. + /// @param _anchorStateRegistry Address of the AnchorStateRegistry. + /// @param _ethLockbox Contract of the ETHLockbox. + function initialize( + ISystemConfig _systemConfig, + IAnchorStateRegistry _anchorStateRegistry, + IETHLockbox _ethLockbox + ) + external + reinitializer(initVersion()) + { + // Initialization transactions must come from the ProxyAdmin or its owner. + _assertOnlyProxyAdminOrProxyAdminOwner(); + + // Now perform initialization logic. + systemConfig = _systemConfig; + anchorStateRegistry = _anchorStateRegistry; + ethLockbox = _ethLockbox; + + // Set the l2Sender slot, only if it is currently empty. This signals the first + // initialization of the contract. + if (l2Sender == address(0)) { + l2Sender = Constants.DEFAULT_L2_SENDER; + } + + __ResourceMetering_init(); + } + + /// @notice Upgrades the OptimismPortal contract to have a reference to the AnchorStateRegistry and SystemConfig + /// @param _anchorStateRegistry AnchorStateRegistry contract. + /// @param _ethLockbox ETHLockbox contract. + function upgrade( + IAnchorStateRegistry _anchorStateRegistry, + IETHLockbox _ethLockbox + ) + external + reinitializer(initVersion()) + { + // Upgrade transactions must come from the ProxyAdmin or its owner. + _assertOnlyProxyAdminOrProxyAdminOwner(); + + // Now perform upgrade logic. + anchorStateRegistry = _anchorStateRegistry; + ethLockbox = _ethLockbox; + } + + /// @notice Getter for the current paused status. + function paused() public view returns (bool) { + return systemConfig.paused(); + } + + /// @notice Getter for the proof maturity delay. + function proofMaturityDelaySeconds() public view returns (uint256) { + return PROOF_MATURITY_DELAY_SECONDS; + } + + /// @notice Getter for the address of the DisputeGameFactory contract. + function disputeGameFactory() public view returns (IDisputeGameFactory) { + return anchorStateRegistry.disputeGameFactory(); + } + + /// @notice Returns the SuperchainConfig contract. + /// @return ISuperchainConfig The SuperchainConfig contract. + function superchainConfig() external view returns (ISuperchainConfig) { + return systemConfig.superchainConfig(); + } + + /// @custom:legacy + /// @notice Getter function for the address of the guardian. + function guardian() external view returns (address) { + return systemConfig.guardian(); + } + + /// @custom:legacy + /// @notice Getter for the dispute game finality delay. + function disputeGameFinalityDelaySeconds() external view returns (uint256) { + return anchorStateRegistry.disputeGameFinalityDelaySeconds(); + } + + /// @custom:legacy + /// @notice Getter for the respected game type. + function respectedGameType() external view returns (GameType) { + return anchorStateRegistry.respectedGameType(); + } + + /// @custom:legacy + /// @notice Getter for the retirement timestamp. Note that this value NO LONGER reflects the + /// timestamp at which the respected game type was updated. Game retirement and + /// respected game type value have been decoupled, this function now only returns the + /// retirement timestamp. + function respectedGameTypeUpdatedAt() external view returns (uint64) { + return anchorStateRegistry.retirementTimestamp(); + } + + /// @custom:legacy + /// @notice Getter for the dispute game blacklist. + /// @param _disputeGame The dispute game to check. + /// @return Whether the dispute game is blacklisted. + function disputeGameBlacklist(IDisputeGame _disputeGame) public view returns (bool) { + return anchorStateRegistry.disputeGameBlacklist(_disputeGame); + } + + /// @notice Computes the minimum gas limit for a deposit. + /// The minimum gas limit linearly increases based on the size of the calldata. + /// This is to prevent users from creating L2 resource usage without paying for it. + /// This function can be used when interacting with the portal to ensure forwards + /// compatibility. + /// @param _byteCount Number of bytes in the calldata. + /// @return The minimum gas limit for a deposit. + function minimumGasLimit(uint64 _byteCount) public pure returns (uint64) { + return _byteCount * 40 + 21000; + } + + /// @notice Accepts value so that users can send ETH directly to this contract and have the + /// funds be deposited to their address on L2. This is intended as a convenience + /// function for EOAs. Contracts should call the depositTransaction() function directly + /// otherwise any deposited funds will be lost due to address aliasing. + receive() external payable { + depositTransaction(msg.sender, msg.value, RECEIVE_DEFAULT_GAS_LIMIT, false, bytes("")); + } + + /// @notice Accepts ETH value without triggering a deposit to L2. + function donateETH() external payable { + // Intentionally empty. + } + + /// @notice Migrates the total ETH balance to the ETHLockbox. + function migrateLiquidity() public { + // Liquidity migration can only be triggered by the ProxyAdmin owner. + _assertOnlyProxyAdminOwner(); + + // Migrate the liquidity. + uint256 ethBalance = address(this).balance; + ethLockbox.lockETH{ value: ethBalance }(); + emit ETHMigrated(address(ethLockbox), ethBalance); + } + + /// @notice Allows the owner of the ProxyAdmin to migrate the OptimismPortal to use a new + /// lockbox, point at a new AnchorStateRegistry, and start to use the Super Roots proof + /// method. Primarily used for OptimismPortal instances to join the interop set, but + /// can also be used to swap the proof method from Output Roots to Super Roots if the + /// provided lockbox is the same as the current one. + /// @dev It is possible to change lockboxes without migrating liquidity. This can cause one + /// of the OptimismPortal instances connected to the new lockbox to not be able to + /// unlock sufficient ETH to finalize withdrawals which would trigger reverts. To avoid + /// this issue, guarantee that this function is called atomically alongside the + /// ETHLockbox.migrateLiquidity() function within the same transaction. + /// @param _newLockbox The address of the new ETHLockbox contract. + /// @param _newAnchorStateRegistry The address of the new AnchorStateRegistry contract. + function migrateToSuperRoots(IETHLockbox _newLockbox, IAnchorStateRegistry _newAnchorStateRegistry) external { + // Migration can only be triggered when the system is not paused because the migration can + // potentially unpause the system as a result of the modified ETHLockbox address. + _assertNotPaused(); + + // Migration can only be triggered by the ProxyAdmin owner. + _assertOnlyProxyAdminOwner(); + + // Chains can use this method to swap the proof method from Output Roots to Super Roots + // without joining the interop set. In this case, the old and new lockboxes will be the + // same. However, whether or not a chain is joining the interop set, all chains will need a + // new AnchorStateRegistry when migrating to Super Roots. We therefore check that the new + // AnchorStateRegistry is different than the old one to prevent this function from being + // accidentally misused. + if (anchorStateRegistry == _newAnchorStateRegistry) { + revert OptimismPortal_MigratingToSameRegistry(); + } + + // Update the ETHLockbox. + IETHLockbox oldLockbox = ethLockbox; + ethLockbox = _newLockbox; + + // Update the AnchorStateRegistry. + IAnchorStateRegistry oldAnchorStateRegistry = anchorStateRegistry; + anchorStateRegistry = _newAnchorStateRegistry; + + // Set the proof method to Super Roots. We expect that migration will happen more than once + // for some chains (switching to single-chain Super Roots and then later joining the + // interop set) so we don't need to check that this is false. + superRootsActive = true; + + // Emit a PortalMigrated event. + emit PortalMigrated(oldLockbox, _newLockbox, oldAnchorStateRegistry, _newAnchorStateRegistry); + } + + /// @notice Proves a withdrawal transaction using a Super Root proof. Only callable when the + /// OptimismPortal is using Super Roots (superRootsActive flag is true). + /// @param _tx Withdrawal transaction to finalize. + /// @param _disputeGameProxy Address of the dispute game to prove the withdrawal against. + /// @param _outputRootIndex Index of the target Output Root within the Super Root. + /// @param _superRootProof Inclusion proof of the Output Root within the Super Root. + /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. + /// @param _withdrawalProof Inclusion proof of the withdrawal within the L2ToL1MessagePasser. + function proveWithdrawalTransaction( + Types.WithdrawalTransaction memory _tx, + IDisputeGame _disputeGameProxy, + uint256 _outputRootIndex, + Types.SuperRootProof calldata _superRootProof, + Types.OutputRootProof calldata _outputRootProof, + bytes[] calldata _withdrawalProof + ) + external + { + // Cannot prove withdrawal transactions while the system is paused. + _assertNotPaused(); + + // Make sure that the OptimismPortal is using Super Roots. + if (!superRootsActive) { + revert OptimismPortal_WrongProofMethod(); + } + + // Prove the transaction. + _proveWithdrawalTransaction( + _tx, _disputeGameProxy, _outputRootIndex, _superRootProof, _outputRootProof, _withdrawalProof + ); + } + + /// @notice Proves a withdrawal transaction using an Output Root proof. Only callable when the + /// OptimismPortal is using Output Roots (superRootsActive flag is false). + /// @param _tx Withdrawal transaction to finalize. + /// @param _disputeGameIndex Index of the dispute game to prove the withdrawal against. + /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. + /// @param _withdrawalProof Inclusion proof of the withdrawal within the L2ToL1MessagePasser. + function proveWithdrawalTransaction( + Types.WithdrawalTransaction memory _tx, + uint256 _disputeGameIndex, + Types.OutputRootProof calldata _outputRootProof, + bytes[] calldata _withdrawalProof + ) + external + { + // Cannot prove withdrawal transactions while the system is paused. + _assertNotPaused(); + + // Make sure that the OptimismPortal is using Output Roots. + if (superRootsActive) { + revert OptimismPortal_WrongProofMethod(); + } + + // Fetch the dispute game proxy from the `DisputeGameFactory` contract. + (,, IDisputeGame disputeGameProxy) = disputeGameFactory().gameAtIndex(_disputeGameIndex); + + // Create a dummy super root proof to pass into the internal function. Note that this is + // not a valid Super Root proof but it isn't used anywhere in the internal function when + // using Output Roots. + Types.SuperRootProof memory superRootProof; + + // Prove the transaction. + _proveWithdrawalTransaction(_tx, disputeGameProxy, 0, superRootProof, _outputRootProof, _withdrawalProof); + } + + /// @notice Internal function for proving a withdrawal transaction, used by both the Super Root + /// and Output Root proof functions. Will eventually be replaced with a single function + /// when the Output Root proof method is deprecated. + /// @param _tx Withdrawal transaction to prove. + /// @param _disputeGameProxy Address of the dispute game to prove the withdrawal against. + /// @param _outputRootIndex Index of the target Output Root within the Super Root. + /// @param _superRootProof Inclusion proof of the Output Root within the Super Root. + /// @param _outputRootProof Inclusion proof of the L2ToL1MessagePasser storage root. + /// @param _withdrawalProof Inclusion proof of the withdrawal within the L2ToL1MessagePasser. + function _proveWithdrawalTransaction( + Types.WithdrawalTransaction memory _tx, + IDisputeGame _disputeGameProxy, + uint256 _outputRootIndex, + Types.SuperRootProof memory _superRootProof, + Types.OutputRootProof memory _outputRootProof, + bytes[] memory _withdrawalProof + ) + internal + { + // Make sure that the target address is safe. + if (_isUnsafeTarget(_tx.target)) { + revert OptimismPortal_BadTarget(); + } + + // Game must be a Proper Game. + if (!anchorStateRegistry.isGameProper(_disputeGameProxy)) { + revert OptimismPortal_ImproperDisputeGame(); + } + + // Game must have been respected game type when created. + if (!anchorStateRegistry.isGameRespected(_disputeGameProxy)) { + revert OptimismPortal_InvalidDisputeGame(); + } + + // Game must not have resolved in favor of the Challenger (invalid root claim). + if (_disputeGameProxy.status() == GameStatus.CHALLENGER_WINS) { + revert OptimismPortal_InvalidDisputeGame(); + } + + // As a sanity check, we make sure that the current timestamp is not less than or equal to + // the dispute game's creation timestamp. Not strictly necessary but extra layer of + // safety against weird bugs. Note that this blocks withdrawals from being proven in the + // same block that a dispute game is created. + if (block.timestamp <= _disputeGameProxy.createdAt().raw()) { + revert OptimismPortal_InvalidProofTimestamp(); + } + + // Validate the provided Output Root and/or Super Root proof depending on proof method. + if (superRootsActive) { + // Verify that the super root can be generated with the elements in the proof. + if (_disputeGameProxy.rootClaim().raw() != Hashing.hashSuperRootProof(_superRootProof)) { + revert OptimismPortal_InvalidSuperRootProof(); + } + + // Check that the index exists in the super root proof. + if (_outputRootIndex >= _superRootProof.outputRoots.length) { + revert OptimismPortal_InvalidOutputRootIndex(); + } + + // Check that the output root has the correct chain id. + Types.OutputRootWithChainId memory outputRoot = _superRootProof.outputRoots[_outputRootIndex]; + if (outputRoot.chainId != systemConfig.l2ChainId()) { + revert OptimismPortal_InvalidOutputRootChainId(); + } + + // Verify that the output root can be generated with the elements in the proof. + if (outputRoot.root != Hashing.hashOutputRootProof(_outputRootProof)) { + revert OptimismPortal_InvalidOutputRootProof(); + } + } else { + // Verify that the output root can be generated with the elements in the proof. + if (_disputeGameProxy.rootClaim().raw() != Hashing.hashOutputRootProof(_outputRootProof)) { + revert OptimismPortal_InvalidOutputRootProof(); + } + } + + // Load the ProvenWithdrawal into memory, using the withdrawal hash as a unique identifier. + bytes32 withdrawalHash = Hashing.hashWithdrawal(_tx); + + // Compute the storage slot of the withdrawal hash in the L2ToL1MessagePasser contract. + // Refer to the Solidity documentation for more information on how storage layouts are + // computed for mappings. + bytes32 storageKey = keccak256( + abi.encode( + withdrawalHash, + uint256(0) // The withdrawals mapping is at the first slot in the layout. + ) + ); + + // Verify that the hash of this withdrawal was stored in the L2toL1MessagePasser contract + // on L2. If this is true, under the assumption that the SecureMerkleTrie does not have + // bugs, then we know that this withdrawal was actually triggered on L2 and can therefore + // be relayed on L1. + if ( + SecureMerkleTrie.verifyInclusionProof({ + _key: abi.encode(storageKey), + _value: hex"01", + _proof: _withdrawalProof, + _root: _outputRootProof.messagePasserStorageRoot + }) == false + ) { + revert OptimismPortal_InvalidMerkleProof(); + } + + // Designate the withdrawalHash as proven by storing the disputeGameProxy and timestamp in + // the provenWithdrawals mapping. A given user may re-prove a withdrawalHash multiple + // times, but each proof will reset the proof timer. + provenWithdrawals[withdrawalHash][msg.sender] = + ProvenWithdrawal({ disputeGameProxy: _disputeGameProxy, timestamp: uint64(block.timestamp) }); + + // Add the proof submitter to the list of proof submitters for this withdrawal hash. + proofSubmitters[withdrawalHash].push(msg.sender); + + // Emit a WithdrawalProven events. + emit WithdrawalProven(withdrawalHash, _tx.sender, _tx.target); + emit WithdrawalProvenExtension1(withdrawalHash, msg.sender); + } + + /// @notice Finalizes a withdrawal transaction. + /// @param _tx Withdrawal transaction to finalize. + function finalizeWithdrawalTransaction(Types.WithdrawalTransaction memory _tx) external { + finalizeWithdrawalTransactionExternalProof(_tx, msg.sender); + } + + /// @notice Finalizes a withdrawal transaction, using an external proof submitter. + /// @param _tx Withdrawal transaction to finalize. + /// @param _proofSubmitter Address of the proof submitter. + function finalizeWithdrawalTransactionExternalProof( + Types.WithdrawalTransaction memory _tx, + address _proofSubmitter + ) + public + { + // Cannot finalize withdrawal transactions while the system is paused. + _assertNotPaused(); + + // Make sure that the l2Sender has not yet been set. The l2Sender is set to a value other + // than the default value when a withdrawal transaction is being finalized. This check is + // a defacto reentrancy guard. + if (l2Sender != Constants.DEFAULT_L2_SENDER) { + revert OptimismPortal_NoReentrancy(); + } + + // Make sure that the target address is safe. + if (_isUnsafeTarget(_tx.target)) { + revert OptimismPortal_BadTarget(); + } + + // Grab the withdrawal. + bytes32 withdrawalHash = Hashing.hashWithdrawal(_tx); + + // Check that the withdrawal can be finalized. + checkWithdrawal(withdrawalHash, _proofSubmitter); + + // Mark the withdrawal as finalized so it can't be replayed. + finalizedWithdrawals[withdrawalHash] = true; + + // Unlock the ETH from the ETHLockbox. + if (_tx.value > 0) ethLockbox.unlockETH(_tx.value); + + // Set the l2Sender so contracts know who triggered this withdrawal on L2. + l2Sender = _tx.sender; + + // Trigger the call to the target contract. We use a custom low level method + // SafeCall.callWithMinGas to ensure two key properties + // 1. Target contracts cannot force this call to run out of gas by returning a very large + // amount of data (and this is OK because we don't care about the returndata here). + // 2. The amount of gas provided to the execution context of the target is at least the + // gas limit specified by the user. If there is not enough gas in the current context + // to accomplish this, `callWithMinGas` will revert. + bool success = SafeCall.callWithMinGas(_tx.target, _tx.gasLimit, _tx.value, _tx.data); + + // Reset the l2Sender back to the default value. + l2Sender = Constants.DEFAULT_L2_SENDER; + + // All withdrawals are immediately finalized. Replayability can + // be achieved through contracts built on top of this contract + emit WithdrawalFinalized(withdrawalHash, success); + + // Send ETH back to the Lockbox in the case of a failed transaction or it'll get stuck here + // and would need to be moved back via the migrateLiquidity function. + if (!success && _tx.value > 0) { + ethLockbox.lockETH{ value: _tx.value }(); + } + + // Reverting here is useful for determining the exact gas cost to successfully execute the + // sub call to the target contract if the minimum gas limit specified by the user would not + // be sufficient to execute the sub call. + if (!success && tx.origin == Constants.ESTIMATION_ADDRESS) { + revert OptimismPortal_GasEstimation(); + } + } + + /// @notice Checks that a withdrawal has been proven and is ready to be finalized. + /// @param _withdrawalHash Hash of the withdrawal. + /// @param _proofSubmitter Address of the proof submitter. + function checkWithdrawal(bytes32 _withdrawalHash, address _proofSubmitter) public view { + // Grab the withdrawal and dispute game proxy. + ProvenWithdrawal memory provenWithdrawal = provenWithdrawals[_withdrawalHash][_proofSubmitter]; + IDisputeGame disputeGameProxy = provenWithdrawal.disputeGameProxy; + + // Check that this withdrawal has not already been finalized, this is replay protection. + if (finalizedWithdrawals[_withdrawalHash]) { + revert OptimismPortal_AlreadyFinalized(); + } + + // A withdrawal can only be finalized if it has been proven. We know that a withdrawal has + // been proven at least once when its timestamp is non-zero. Unproven withdrawals will have + // a timestamp of zero. + if (provenWithdrawal.timestamp == 0) { + revert OptimismPortal_Unproven(); + } + + // As a sanity check, we make sure that the proven withdrawal's timestamp is greater than + // starting timestamp inside the Dispute Game. Not strictly necessary but extra layer of + // safety against weird bugs in the proving step. Note that this blocks withdrawals that + // are proven in the same block that a dispute game is created. + if (provenWithdrawal.timestamp <= disputeGameProxy.createdAt().raw()) { + revert OptimismPortal_InvalidProofTimestamp(); + } + + // A proven withdrawal must wait at least `PROOF_MATURITY_DELAY_SECONDS` before finalizing. + if (block.timestamp - provenWithdrawal.timestamp <= PROOF_MATURITY_DELAY_SECONDS) { + revert OptimismPortal_ProofNotOldEnough(); + } + + // Check that the root claim is valid. + if (!anchorStateRegistry.isGameClaimValid(disputeGameProxy)) { + revert OptimismPortal_InvalidRootClaim(); + } + } + + /// @notice Accepts deposits of ETH and data, and emits a TransactionDeposited event for use in + /// deriving deposit transactions. Note that if a deposit is made by a contract, its + /// address will be aliased when retrieved using `tx.origin` or `msg.sender`. Consider + /// using the CrossDomainMessenger contracts for a simpler developer experience. + /// @dev The `msg.value` is locked on the ETHLockbox and minted as ETH when the deposit + /// arrives on L2, while `_value` specifies how much ETH to send to the target. + /// @param _to Target address on L2. + /// @param _value ETH value to send to the recipient. + /// @param _gasLimit Amount of L2 gas to purchase by burning gas on L1. + /// @param _isCreation Whether or not the transaction is a contract creation. + /// @param _data Data to trigger the recipient with. + function depositTransaction( + address _to, + uint256 _value, + uint64 _gasLimit, + bool _isCreation, + bytes memory _data + ) + public + payable + metered(_gasLimit) + { + // Lock the ETH in the ETHLockbox. + if (msg.value > 0) ethLockbox.lockETH{ value: msg.value }(); + + // Just to be safe, make sure that people specify address(0) as the target when doing + // contract creations. + if (_isCreation && _to != address(0)) { + revert OptimismPortal_BadTarget(); + } + + // Prevent depositing transactions that have too small of a gas limit. Users should pay + // more for more resource usage. + if (_gasLimit < minimumGasLimit(uint64(_data.length))) { + revert OptimismPortal_GasLimitTooLow(); + } + + // Prevent the creation of deposit transactions that have too much calldata. This gives an + // upper limit on the size of unsafe blocks over the p2p network. 120kb is chosen to ensure + // that the transaction can fit into the p2p network policy of 128kb even though deposit + // transactions are not gossipped over the p2p network. + if (_data.length > 120_000) { + revert OptimismPortal_CalldataTooLarge(); + } + + // Transform the from-address to its alias if the caller is a contract. + address from = msg.sender; + if (!EOA.isSenderEOA()) { + from = AddressAliasHelper.applyL1ToL2Alias(msg.sender); + } + + // Compute the opaque data that will be emitted as part of the TransactionDeposited event. + // We use opaque data so that we can update the TransactionDeposited event in the future + // without breaking the current interface. + bytes memory opaqueData = abi.encodePacked(msg.value, _value, _gasLimit, _isCreation, _data); + + // Emit a TransactionDeposited event so that the rollup node can derive a deposit + // transaction for this deposit. + emit TransactionDeposited(from, _to, DEPOSIT_VERSION, opaqueData); + } + + /// @notice External getter for the number of proof submitters for a withdrawal hash. + /// @param _withdrawalHash Hash of the withdrawal. + /// @return The number of proof submitters for the withdrawal hash. + function numProofSubmitters(bytes32 _withdrawalHash) external view returns (uint256) { + return proofSubmitters[_withdrawalHash].length; + } + + /// @notice Asserts that the contract is not paused. + function _assertNotPaused() internal view { + if (paused()) { + revert OptimismPortal_CallPaused(); + } + } + + /// @notice Checks if a target address is unsafe. + function _isUnsafeTarget(address _target) internal view virtual returns (bool) { + // Prevent users from targeting an unsafe target address on a withdrawal transaction. + return _target == address(this) || _target == address(ethLockbox); + } + + /// @notice Getter for the resource config. Used internally by the ResourceMetering contract. + /// The SystemConfig is the source of truth for the resource config. + /// @return config_ ResourceMetering ResourceConfig + function _resourceConfig() internal view override returns (ResourceMetering.ResourceConfig memory config_) { + IResourceMetering.ResourceConfig memory config = systemConfig.resourceConfig(); + assembly ("memory-safe") { + config_ := config + } + } +} diff --git a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol index 5e36673580d37..ae3fad92942e2 100644 --- a/packages/contracts-bedrock/src/L1/ProtocolVersions.sol +++ b/packages/contracts-bedrock/src/L1/ProtocolVersions.sol @@ -41,8 +41,8 @@ contract ProtocolVersions is OwnableUpgradeable, ISemver { event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 1.1.0 - string public constant version = "1.1.0"; + /// @custom:semver 1.1.1 + string public constant version = "1.1.1"; /// @notice Constructs the ProtocolVersion contract. constructor() { diff --git a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol index 870a35e134428..c0157fef35cfa 100644 --- a/packages/contracts-bedrock/src/L1/SuperchainConfig.sol +++ b/packages/contracts-bedrock/src/L1/SuperchainConfig.sol @@ -9,9 +9,6 @@ import { ReinitializableBase } from "src/universal/ReinitializableBase.sol"; // Interfaces import { ISemver } from "interfaces/universal/ISemver.sol"; -// Libraries -import { Storage } from "src/libraries/Storage.sol"; - /// @custom:proxied true /// @custom:audit none This contracts is not yet audited. /// @title SuperchainConfig @@ -59,8 +56,8 @@ contract SuperchainConfig is ProxyAdminOwnedBase, Initializable, Reinitializable event ConfigUpdate(UpdateType indexed updateType, bytes data); /// @notice Semantic version. - /// @custom:semver 2.3.0 - string public constant version = "2.3.0"; + /// @custom:semver 2.4.0 + string public constant version = "2.4.0"; /// @notice Constructs the SuperchainConfig contract. constructor() ReinitializableBase(2) { @@ -77,28 +74,6 @@ contract SuperchainConfig is ProxyAdminOwnedBase, Initializable, Reinitializable _setGuardian(_guardian); } - /// @notice Upgrades the SuperchainConfig contract. - function upgrade() external reinitializer(initVersion()) { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - // Transfer the guardian into the new variable and clear the old storage slot. - // We generally do not clear out old storage slots but in the case of the SuperchainConfig - // these are the only spacer slots, they aren't cleanly represented by spacer variables, - // and we can get rid of them now and never think about them again later. - bytes32 guardianSlot = bytes32(uint256(keccak256("superchainConfig.guardian")) - 1); - _setGuardian(Storage.getAddress(guardianSlot)); - Storage.setBytes32(guardianSlot, bytes32(0)); - - // Clear the old paused slot. - // Note that if the pause was active while the upgrade was happening, the system will no - // longer be paused after the upgrade. Upgrades should generally not ever be executed while - // the system is paused, but it's worth noting that this is the case. - bytes32 pausedSlot = bytes32(uint256(keccak256("superchainConfig.paused")) - 1); - Storage.setBytes32(pausedSlot, bytes32(0)); - } - /// @notice Returns the duration after which a pause expires. /// @return The duration after which a pause expires. function pauseExpiry() external pure returns (uint256) { diff --git a/packages/contracts-bedrock/src/L1/SystemConfig.sol b/packages/contracts-bedrock/src/L1/SystemConfig.sol index 604a9936d3aef..68b5dba6ef81c 100644 --- a/packages/contracts-bedrock/src/L1/SystemConfig.sol +++ b/packages/contracts-bedrock/src/L1/SystemConfig.sol @@ -8,13 +8,13 @@ import { ProxyAdminOwnedBase } from "src/L1/ProxyAdminOwnedBase.sol"; // Libraries import { Storage } from "src/libraries/Storage.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { ISemver } from "interfaces/universal/ISemver.sol"; import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; -import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; /// @custom:proxied true /// @title SystemConfig @@ -28,13 +28,17 @@ contract SystemConfig is ProxyAdminOwnedBase, OwnableUpgradeable, Reinitializabl /// @custom:value GAS_LIMIT Represents an update to gas limit on L2. /// @custom:value UNSAFE_BLOCK_SIGNER Represents an update to the signer key for unsafe /// block distrubution. + /// @custom:value EIP_1559_PARAMS Represents an update to EIP-1559 parameters. + /// @custom:value OPERATOR_FEE_PARAMS Represents an update to operator fee parameters. + /// @custom:value MIN_BASE_FEE Represents an update to the minimum base fee. enum UpdateType { BATCHER, FEE_SCALARS, GAS_LIMIT, UNSAFE_BLOCK_SIGNER, EIP_1559_PARAMS, - OPERATOR_FEE_PARAMS + OPERATOR_FEE_PARAMS, + MIN_BASE_FEE } /// @notice Struct representing the addresses of L1 system contracts. These should be the @@ -135,22 +139,37 @@ contract SystemConfig is ProxyAdminOwnedBase, OwnableUpgradeable, Reinitializabl /// @notice The SuperchainConfig contract that manages the pause state. ISuperchainConfig public superchainConfig; + /// @notice The minimum base fee, in wei. + uint64 public minBaseFee; + + /// @notice Bytes32 feature flag name to boolean enabled value. + mapping(bytes32 => bool) public isFeatureEnabled; + /// @notice Emitted when configuration is updated. /// @param version SystemConfig version. /// @param updateType Type of update. /// @param data Encoded update data. event ConfigUpdate(uint256 indexed version, UpdateType indexed updateType, bytes data); + /// @notice Emitted when a feature is set. + /// @param feature Feature that was set. + /// @param enabled Whether the feature is enabled. + event FeatureSet(bytes32 indexed feature, bool indexed enabled); + + /// @notice Thrown when attempting to enable/disable a feature when already enabled/disabled, + /// respectively. + error SystemConfig_InvalidFeatureState(); + /// @notice Semantic version. - /// @custom:semver 3.4.0 + /// @custom:semver 3.10.0 function version() public pure virtual returns (string memory) { - return "3.4.0"; + return "3.10.0"; } /// @notice Constructs the SystemConfig contract. /// @dev START_BLOCK_SLOT is set to type(uint256).max here so that it will be a dead value /// in the singleton. - constructor() ReinitializableBase(2) { + constructor() ReinitializableBase(3) { Storage.setUint(START_BLOCK_SLOT, type(uint256).max); _disableInitializers(); } @@ -213,27 +232,6 @@ contract SystemConfig is ProxyAdminOwnedBase, OwnableUpgradeable, Reinitializabl superchainConfig = _superchainConfig; } - /// @notice Upgrades the SystemConfig by adding a reference to the SuperchainConfig. - /// @param _l2ChainId The L2 chain ID that this SystemConfig configures. - /// @param _superchainConfig The SuperchainConfig contract address. - function upgrade(uint256 _l2ChainId, ISuperchainConfig _superchainConfig) external reinitializer(initVersion()) { - // Upgrade transactions must come from the ProxyAdmin or its owner. - _assertOnlyProxyAdminOrProxyAdminOwner(); - - // Now perform upgrade logic. - // Set the L2 chain ID. - l2ChainId = _l2ChainId; - - // Set the SuperchainConfig contract. - superchainConfig = _superchainConfig; - - // Clear out the old dispute game factory address, it's derived now. We get rid of this - // storage slot because it doesn't use structured storage and we can't use a spacer - // variable to block it off. - bytes32 disputeGameFactorySlot = bytes32(uint256(keccak256("systemconfig.disputegamefactory")) - 1); - Storage.setBytes32(disputeGameFactorySlot, bytes32(0)); - } - /// @notice Returns the minimum L2 gas limit that can be safely set for the system to /// operate. The L2 gas limit must be larger than or equal to the amount of /// gas that is allocated for deposits per block plus the amount of gas that @@ -419,6 +417,21 @@ contract SystemConfig is ProxyAdminOwnedBase, OwnableUpgradeable, Reinitializabl emit ConfigUpdate(VERSION, UpdateType.EIP_1559_PARAMS, data); } + /// @notice Updates the minimum base fee. Can only be called by the owner. + /// Setting this value to 0 is equivalent to disabling the min base fee feature + /// @param _minBaseFee New minimum base fee. + function setMinBaseFee(uint64 _minBaseFee) external onlyOwner { + _setMinBaseFee(_minBaseFee); + } + + /// @notice Internal function for updating the minimum base fee. + function _setMinBaseFee(uint64 _minBaseFee) internal { + minBaseFee = _minBaseFee; + + bytes memory data = abi.encode(_minBaseFee); + emit ConfigUpdate(VERSION, UpdateType.MIN_BASE_FEE, data); + } + /// @notice Updates the operator fee parameters. Can only be called by the owner. /// @param _operatorFeeScalar operator fee scalar. /// @param _operatorFeeConstant operator fee constant. @@ -484,12 +497,67 @@ contract SystemConfig is ProxyAdminOwnedBase, OwnableUpgradeable, Reinitializabl _resourceConfig = _config; } - /// @notice Returns the current pause state of the system by checking if the SuperchainConfig is paused for this - /// chain's ETHLockbox. + /// @notice Sets a feature flag enabled or disabled. Can only be called by the ProxyAdmin or + /// its owner. + /// @param _feature Feature to set. + /// @param _enabled Whether the feature should be enabled or disabled. + function setFeature(bytes32 _feature, bool _enabled) external { + // Features can only be set by the ProxyAdmin or its owner. + _assertOnlyProxyAdminOrProxyAdminOwner(); + + // As a sanity check, prevent users from enabling the feature if already enabled or + // disabling the feature if already disabled. This helps to prevent accidental misuse. + if (_enabled == isFeatureEnabled[_feature]) { + revert SystemConfig_InvalidFeatureState(); + } + + // Handle feature-specific safety logic here. + if (_feature == Features.ETH_LOCKBOX) { + // It would probably better to check that the ETHLockbox contract is set inside the + // OptimismPortal2 contract before you're allowed to enable the feature here, but the + // portal checks that the feature is set before allowing you to set the lockbox, so + // these checks are good enough. + + // Lockbox shouldn't be unset if the ethLockbox address is still configured in the + // OptimismPortal2 contract. Doing so would cause the system to start keeping ETH in + // the portal. This check means there's no way to stop using ETHLockbox at the moment + // after it's been configured (which is expected). + if ( + isFeatureEnabled[_feature] && !_enabled + && address(IOptimismPortal2(payable(optimismPortal())).ethLockbox()) != address(0) + ) { + revert SystemConfig_InvalidFeatureState(); + } + + // Lockbox can't be set or unset if the system is currently paused because it would + // change the pause identifier which would potentially cause the system to become + // unpaused unexpectedly. + if (paused()) { + revert SystemConfig_InvalidFeatureState(); + } + } + + // Set the feature. + isFeatureEnabled[_feature] = _enabled; + + // Emit an event. + emit FeatureSet(_feature, _enabled); + } + + /// @notice Returns the current pause state for this network. If the network is using + /// ETHLockbox, the system is paused if either the global pause is active or the pause + /// is active where the ETHLockbox address is used as the identifier. If the network is + /// not using ETHLockbox, the system is paused if either the global pause is active or + /// the pause is active where the OptimismPortal address is used as the identifier. /// @return bool True if the system is paused, false otherwise. function paused() public view returns (bool) { - IETHLockbox lockbox = IOptimismPortal2(payable(optimismPortal())).ethLockbox(); - return superchainConfig.paused(address(lockbox)) || superchainConfig.paused(address(0)); + // Determine the appropriate chain identifier based on the feature flags. + address identifier = isFeatureEnabled[Features.ETH_LOCKBOX] + ? address(IOptimismPortal2(payable(optimismPortal())).ethLockbox()) + : address(optimismPortal()); + + // Check if either global or local pause is active. + return superchainConfig.paused(address(0)) || superchainConfig.paused(identifier); } /// @notice Returns the guardian address of the SuperchainConfig. diff --git a/packages/contracts-bedrock/src/cannon/MIPS64.sol b/packages/contracts-bedrock/src/cannon/MIPS64.sol index ef632155a8604..c00c705f4a610 100644 --- a/packages/contracts-bedrock/src/cannon/MIPS64.sol +++ b/packages/contracts-bedrock/src/cannon/MIPS64.sol @@ -66,8 +66,8 @@ contract MIPS64 is ISemver { } /// @notice The semantic version of the MIPS64 contract. - /// @custom:semver 1.8.0 - string public constant version = "1.8.0"; + /// @custom:semver 1.9.0 + string public constant version = "1.9.0"; /// @notice The preimage oracle contract. IPreimageOracle internal immutable ORACLE; @@ -273,8 +273,7 @@ contract MIPS64 is ISemver { memProofOffset: MIPS64Memory.memoryProofOffset(MEM_PROOF_OFFSET, 1), insn: insn, opcode: opcode, - fun: fun, - stateVersion: STATE_VERSION + fun: fun }); bool memUpdated; uint64 effMemAddr; @@ -568,9 +567,7 @@ contract MIPS64 is ISemver { } else if (syscall_no == sys.SYS_MUNMAP) { // ignored } else if (syscall_no == sys.SYS_MPROTECT) { - if (!st.featuresForVersion(STATE_VERSION).supportNoopMprotect) { - revert("MIPS64: unimplemented syscall"); - } + // ignored } else if (syscall_no == sys.SYS_GETAFFINITY) { // ignored } else if (syscall_no == sys.SYS_MADVISE) { @@ -630,10 +627,6 @@ contract MIPS64 is ISemver { } else if (syscall_no == sys.SYS_LSEEK) { // ignored } else if (syscall_no == sys.SYS_EVENTFD2) { - if (!st.featuresForVersion(STATE_VERSION).supportMinimalSysEventFd2) { - revert("MIPS64: unimplemented syscall"); - } - // a0 = initial value, a1 = flags // Validate flags if (a1 & sys.EFD_NONBLOCK == 0) { diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol index 2c0f3acfa3c4f..42c150af3b304 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64Instructions.sol @@ -34,8 +34,6 @@ library MIPS64Instructions { uint32 opcode; /// @param fun The function value parsed from insn. uint32 fun; - /// @param stateVersion The state version. - uint256 stateVersion; } struct ExecuteMipsInstructionParams { @@ -51,8 +49,6 @@ library MIPS64Instructions { uint64 rt; /// @param mem The value fetched from memory for the current instruction. uint64 mem; - /// @param stateVersion The state version. - uint256 stateVersion; } /// @param _pc The program counter. @@ -181,8 +177,7 @@ library MIPS64Instructions { fun: _args.fun, rs: rs, rt: rt, - mem: mem, - stateVersion: _args.stateVersion + mem: mem }); uint64 val = executeMipsInstruction(params) & U64_MASK; @@ -248,7 +243,6 @@ library MIPS64Instructions { uint64 rs = _args.rs; uint64 rt = _args.rt; uint64 mem = _args.mem; - uint256 stateVersion = _args.stateVersion; unchecked { if (opcode == 0 || (opcode >= 8 && opcode < 0xF) || opcode == 0x18 || opcode == 0x19) { assembly { @@ -494,7 +488,7 @@ library MIPS64Instructions { return i; } // dclz, dclo - else if (st.featuresForVersion(stateVersion).supportDclzDclo && (fun == 0x24 || fun == 0x25)) { + else if (fun == 0x24 || fun == 0x25) { if (fun == 0x24) { rs = ~rs; } diff --git a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol index 9c74a730dc60f..c19d1f66a0005 100644 --- a/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol +++ b/packages/contracts-bedrock/src/cannon/libraries/MIPS64State.sol @@ -13,9 +13,6 @@ library MIPS64State { } struct Features { - bool supportMinimalSysEventFd2; - bool supportDclzDclo; - bool supportNoopMprotect; bool supportWorkingSysGetRandom; } @@ -26,11 +23,6 @@ library MIPS64State { } function featuresForVersion(uint256 _version) internal pure returns (Features memory features_) { - if (_version >= 7) { - features_.supportMinimalSysEventFd2 = true; - features_.supportDclzDclo = true; - features_.supportNoopMprotect = true; - } if (_version >= 8) { features_.supportWorkingSysGetRandom = true; } diff --git a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol index 3453d752c4461..f9a9e2bf68b64 100644 --- a/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol +++ b/packages/contracts-bedrock/src/dispute/DisputeGameFactory.sol @@ -36,6 +36,11 @@ contract DisputeGameFactory is ProxyAdminOwnedBase, ReinitializableBase, Ownable /// @param gameType The type of the DisputeGame. event ImplementationSet(address indexed impl, GameType indexed gameType); + /// @notice Emitted when a game type's implementation args are set + /// @param gameType The type of the DisputeGame. + /// @param args The constructor args for the game type. + event ImplementationArgsSet(GameType indexed gameType, bytes args); + /// @notice Emitted when a game type's initialization bond is updated /// @param gameType The type of the DisputeGame. /// @param newBond The new bond (in wei) for initializing the game type. @@ -51,8 +56,8 @@ contract DisputeGameFactory is ProxyAdminOwnedBase, ReinitializableBase, Ownable } /// @notice Semantic version. - /// @custom:semver 1.2.0 - string public constant version = "1.2.0"; + /// @custom:semver 1.3.0 + string public constant version = "1.3.0"; /// @notice `gameImpls` is a mapping that maps `GameType`s to their respective /// `IDisputeGame` implementations. @@ -69,6 +74,10 @@ contract DisputeGameFactory is ProxyAdminOwnedBase, ReinitializableBase, Ownable /// efficiently track dispute games. GameId[] internal _disputeGameList; + /// @notice Maps each Game Type to an associated configuration to use with it, but because we need to pass them + /// to a clone with immutable args so they have to be stored as arbitrary bytes unfortunately + mapping(GameType => bytes) public gameArgs; + /// @notice Constructs a new DisputeGameFactory contract. constructor() OwnableUpgradeable() ReinitializableBase(1) { _disableInitializers(); @@ -159,15 +168,18 @@ contract DisputeGameFactory is ProxyAdminOwnedBase, ReinitializableBase, Ownable // Clone the implementation contract and initialize it with the given parameters. // // CWIA Calldata Layout: - // ┌──────────────┬────────────────────────────────────┐ - // │ Bytes │ Description │ - // ├──────────────┼────────────────────────────────────┤ - // │ [0, 20) │ Game creator address │ - // │ [20, 52) │ Root claim │ - // │ [52, 84) │ Parent block hash at creation time │ - // │ [84, 84 + n) │ Extra data (opaque) │ - // └──────────────┴────────────────────────────────────┘ - proxy_ = IDisputeGame(address(impl).clone(abi.encodePacked(msg.sender, _rootClaim, parentHash, _extraData))); + // ┌──────────────────────┬─────────────────────────────────────┐ + // │ Bytes │ Description │ + // ├──────────────────────┼─────────────────────────────────────┤ + // │ [0, 20) │ Game creator address │ + // │ [20, 52) │ Root claim │ + // │ [52, 84) │ Parent block hash at creation time │ + // │ [84, 84 + n) │ Extra data (opaque) │ + // │ [84 + n, 84 + n + m) │ Implementation args (opaque) │ + // └──────────────────────┴─────────────────────────────────────┘ + proxy_ = IDisputeGame( + address(impl).clone(abi.encodePacked(msg.sender, _rootClaim, parentHash, _extraData, gameArgs[_gameType])) + ); proxy_.initialize{ value: msg.value }(); // Compute the unique identifier for the dispute game. @@ -268,6 +280,19 @@ contract DisputeGameFactory is ProxyAdminOwnedBase, ReinitializableBase, Ownable emit ImplementationSet(address(_impl), _gameType); } + /// @notice Sets the implementation contract for a specific `GameType`. + /// @dev May only be called by the `owner`. + /// @param _gameType The type of the DisputeGame. + /// @param _impl The implementation contract for the given `GameType`. + /// @param _args The constructor args to be passed for each implementation + function setImplementation(GameType _gameType, IDisputeGame _impl, bytes calldata _args) external onlyOwner { + gameImpls[_gameType] = _impl; + gameArgs[_gameType] = _args; + + emit ImplementationSet(address(_impl), _gameType); + emit ImplementationArgsSet(_gameType, _args); + } + /// @notice Sets the bond (in wei) for initializing a game type. /// @dev May only be called by the `owner`. /// @param _gameType The type of the DisputeGame. diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index bedb826f97f37..8689221a03a8a 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -57,7 +57,8 @@ import { InvalidBondDistributionMode, GameNotResolved, ReservedGameType, - GamePaused + GamePaused, + BadExtraData } from "src/dispute/lib/Errors.sol"; // Interfaces @@ -171,9 +172,9 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 1.7.0 + /// @custom:semver 1.8.0 function version() public pure virtual returns (string memory) { - return "1.7.0"; + return "1.8.0"; } /// @notice The starting timestamp of the game @@ -312,20 +313,14 @@ contract FaultDisputeGame is Clone, ISemver { // in the factory, but are not used by the game, which would allow for multiple dispute games for the same // output proposal to be created. // - // Expected length: 0x7A - // - 0x04 selector - // - 0x14 creator address - // - 0x20 root claim - // - 0x20 l1 head - // - 0x20 extraData - // - 0x02 CWIA bytes - assembly { - if iszero(eq(calldatasize(), 0x7A)) { - // Store the selector for `BadExtraData()` & revert - mstore(0x00, 0x9824bdab) - revert(0x1C, 0x04) - } - } + // Expected length: 122 bytes + // - 4 bytes selector + // - 20 bytes creator address + // - 32 bytes root claim + // - 32 bytes l1 head + // - 32 bytes extraData + // - 2 bytes CWIA length + if (msg.data.length != 122) revert BadExtraData(); // Do not allow the game to be initialized if the root claim corresponds to a block at or before the // configured starting block number. @@ -645,7 +640,7 @@ contract FaultDisputeGame is Clone, ISemver { /// @notice The l2BlockNumber of the disputed output root in the `L2OutputOracle`. function l2BlockNumber() public pure returns (uint256 l2BlockNumber_) { - l2BlockNumber_ = _getArgUint256(0x54); + l2BlockNumber_ = _getArgUint256(84); } /// @notice The l2SequenceNumber of the disputed output root in the `L2OutputOracle` (in this case - block number). @@ -860,21 +855,21 @@ contract FaultDisputeGame is Clone, ISemver { /// @dev `clones-with-immutable-args` argument #1 /// @return creator_ The creator of the dispute game. function gameCreator() public pure returns (address creator_) { - creator_ = _getArgAddress(0x00); + creator_ = _getArgAddress(0); } /// @notice Getter for the root claim. /// @dev `clones-with-immutable-args` argument #2 /// @return rootClaim_ The root claim of the DisputeGame. function rootClaim() public pure returns (Claim rootClaim_) { - rootClaim_ = Claim.wrap(_getArgBytes32(0x14)); + rootClaim_ = Claim.wrap(_getArgBytes32(20)); } /// @notice Getter for the parent hash of the L1 block when the dispute game was created. /// @dev `clones-with-immutable-args` argument #3 /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. function l1Head() public pure returns (Hash l1Head_) { - l1Head_ = Hash.wrap(_getArgBytes32(0x34)); + l1Head_ = Hash.wrap(_getArgBytes32(52)); } /// @notice Getter for the extra data. @@ -883,7 +878,7 @@ contract FaultDisputeGame is Clone, ISemver { function extraData() public pure returns (bytes memory extraData_) { // The extra data starts at the second word within the cwia calldata and // is 32 bytes long. - extraData_ = _getArgBytes(0x54, 0x20); + extraData_ = _getArgBytes(84, 32); } /// @notice A compliant implementation of this interface should return the components of the diff --git a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol index 8e5574512dd11..f356190ccdd7e 100644 --- a/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/PermissionedDisputeGame.sol @@ -32,9 +32,9 @@ contract PermissionedDisputeGame is FaultDisputeGame { } /// @notice Semantic version. - /// @custom:semver 1.7.0 + /// @custom:semver 1.8.0 function version() public pure override returns (string memory) { - return "1.7.0"; + return "1.8.0"; } /// @param _params Parameters for creating a new FaultDisputeGame. diff --git a/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol index 17c8a5bbd7b97..ccb4e882395bd 100644 --- a/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol @@ -50,7 +50,8 @@ import { InvalidBondDistributionMode, GameNotResolved, ReservedGameType, - GamePaused + GamePaused, + BadExtraData } from "src/dispute/lib/Errors.sol"; // Interfaces @@ -164,9 +165,9 @@ contract SuperFaultDisputeGame is Clone, ISemver { Position internal constant ROOT_POSITION = Position.wrap(1); /// @notice Semantic version. - /// @custom:semver 0.4.0 + /// @custom:semver 0.5.0 function version() public pure virtual returns (string memory) { - return "0.4.0"; + return "0.5.0"; } /// @notice The starting timestamp of the game @@ -302,20 +303,14 @@ contract SuperFaultDisputeGame is Clone, ISemver { // in the factory, but are not used by the game, which would allow for multiple dispute games for the same // output proposal to be created. // - // Expected length: 0x7A - // - 0x04 selector - // - 0x14 creator address - // - 0x20 root claim - // - 0x20 l1 head - // - 0x20 extraData - // - 0x02 CWIA bytes - assembly { - if iszero(eq(calldatasize(), 0x7A)) { - // Store the selector for `BadExtraData()` & revert - mstore(0x00, 0x9824bdab) - revert(0x1C, 0x04) - } - } + // Expected length: 122 bytes + // - 4 bytes selector + // - 20 bytes creator address + // - 32 bytes root claim + // - 32 bytes l1 head + // - 32 bytes extraData + // - 2 bytes CWIA length + if (msg.data.length != 122) revert BadExtraData(); // Do not allow the game to be initialized if the root claim corresponds to a l2 sequence number (timestamp) at // or before the configured starting sequence number. @@ -618,7 +613,7 @@ contract SuperFaultDisputeGame is Clone, ISemver { /// @notice The l2SequenceNumber (timestamp) of the disputed super root in game root claim. function l2SequenceNumber() public pure returns (uint256 l2SequenceNumber_) { - l2SequenceNumber_ = _getArgUint256(0x54); + l2SequenceNumber_ = _getArgUint256(84); } /// @notice Only the starting sequence number (timestamp) of the game. @@ -772,21 +767,21 @@ contract SuperFaultDisputeGame is Clone, ISemver { /// @dev `clones-with-immutable-args` argument #1 /// @return creator_ The creator of the dispute game. function gameCreator() public pure returns (address creator_) { - creator_ = _getArgAddress(0x00); + creator_ = _getArgAddress(0); } /// @notice Getter for the root claim. /// @dev `clones-with-immutable-args` argument #2 /// @return rootClaim_ The root claim of the DisputeGame. function rootClaim() public pure returns (Claim rootClaim_) { - rootClaim_ = Claim.wrap(_getArgBytes32(0x14)); + rootClaim_ = Claim.wrap(_getArgBytes32(20)); } /// @notice Getter for the parent hash of the L1 block when the dispute game was created. /// @dev `clones-with-immutable-args` argument #3 /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. function l1Head() public pure returns (Hash l1Head_) { - l1Head_ = Hash.wrap(_getArgBytes32(0x34)); + l1Head_ = Hash.wrap(_getArgBytes32(52)); } /// @notice Getter for the extra data. @@ -795,7 +790,7 @@ contract SuperFaultDisputeGame is Clone, ISemver { function extraData() public pure returns (bytes memory extraData_) { // The extra data starts at the second word within the cwia calldata and // is 32 bytes long. - extraData_ = _getArgBytes(0x54, 0x20); + extraData_ = _getArgBytes(84, 32); } /// @notice A compliant implementation of this interface should return the components of the diff --git a/packages/contracts-bedrock/src/dispute/SuperPermissionedDisputeGame.sol b/packages/contracts-bedrock/src/dispute/SuperPermissionedDisputeGame.sol index d41e3f0171e33..429bef26ce1ff 100644 --- a/packages/contracts-bedrock/src/dispute/SuperPermissionedDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/SuperPermissionedDisputeGame.sol @@ -33,9 +33,9 @@ contract SuperPermissionedDisputeGame is SuperFaultDisputeGame { } /// @notice Semantic version. - /// @custom:semver 0.4.0 + /// @custom:semver 0.5.0 function version() public pure override returns (string memory) { - return "0.4.0"; + return "0.5.0"; } /// @param _params Parameters for creating a new FaultDisputeGame. diff --git a/packages/contracts-bedrock/src/dispute/lib/Types.sol b/packages/contracts-bedrock/src/dispute/lib/Types.sol index d7ca36663df5f..78e3e99d37d48 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Types.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Types.sol @@ -70,6 +70,15 @@ library GameTypes { /// @notice A dispute game type that uses OP Succinct GameType internal constant OP_SUCCINCT = GameType.wrap(6); + /// @notice A dispute game type that uses the asterisc vm with Kona (Super Roots). + GameType internal constant SUPER_ASTERISC_KONA = GameType.wrap(7); + + /// @notice A dispute game type that uses the cannon vm with Kona. + GameType internal constant CANNON_KONA = GameType.wrap(8); + + /// @notice A dispute game type that uses the cannon vm with Kona (Super Roots). + GameType internal constant SUPER_CANNON_KONA = GameType.wrap(9); + /// @notice A dispute game type with short game duration for testing withdrawals. /// Not intended for production use. GameType internal constant FAST = GameType.wrap(254); diff --git a/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol b/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol new file mode 100644 index 0000000000000..6f50e7c612f39 --- /dev/null +++ b/packages/contracts-bedrock/src/dispute/v2/FaultDisputeGameV2.sol @@ -0,0 +1,1319 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Libraries +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; +import { FixedPointMathLib } from "@solady/utils/FixedPointMathLib.sol"; +import { Clone } from "@solady/utils/Clone.sol"; +import { Types } from "src/libraries/Types.sol"; +import { Hashing } from "src/libraries/Hashing.sol"; +import { RLPReader } from "src/libraries/rlp/RLPReader.sol"; +import { + GameStatus, + GameType, + BondDistributionMode, + Claim, + Clock, + Duration, + Timestamp, + Hash, + Proposal, + LibClock, + LocalPreimageKey, + VMStatuses +} from "src/dispute/lib/Types.sol"; +import { Position, LibPosition } from "src/dispute/lib/LibPosition.sol"; +import { + InvalidParent, + ClaimAlreadyExists, + ClaimAlreadyResolved, + OutOfOrderResolution, + InvalidChallengePeriod, + InvalidSplitDepth, + InvalidClockExtension, + MaxDepthTooLarge, + AnchorRootNotFound, + AlreadyInitialized, + UnexpectedRootClaim, + GameNotInProgress, + InvalidPrestate, + ValidStep, + GameDepthExceeded, + L2BlockNumberChallenged, + InvalidDisputedClaimIndex, + ClockTimeExceeded, + DuplicateStep, + CannotDefendRootClaim, + IncorrectBondAmount, + InvalidLocalIdent, + BlockNumberMatches, + InvalidHeaderRLP, + ClockNotExpired, + BondTransferFailed, + NoCreditToClaim, + InvalidOutputRootProof, + ClaimAboveSplit, + GameNotFinalized, + InvalidBondDistributionMode, + GameNotResolved, + ReservedGameType, + GamePaused, + BadExtraData +} from "src/dispute/lib/Errors.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; +import { IBigStepper, IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; + +/// @title FaultDisputeGameV2 +/// @notice An implementation of the `IFaultDisputeGame` interface. +contract FaultDisputeGameV2 is Clone, ISemver { + //////////////////////////////////////////////////////////////// + // Structs // + //////////////////////////////////////////////////////////////// + + /// @notice The `ClaimData` struct represents the data associated with a Claim. + struct ClaimData { + uint32 parentIndex; + address counteredBy; + address claimant; + uint128 bond; + Claim claim; + Position position; + Clock clock; + } + + /// @notice The `ResolutionCheckpoint` struct represents the data associated with an in-progress claim resolution. + struct ResolutionCheckpoint { + bool initialCheckpointComplete; + uint32 subgameIndex; + Position leftmostPosition; + address counteredBy; + } + + /// @notice Parameters for creating a new FaultDisputeGame. We place this into a struct to + /// avoid stack-too-deep errors when compiling without the optimizer enabled. + struct GameConstructorParams { + GameType gameType; + uint256 maxGameDepth; + uint256 splitDepth; + Duration clockExtension; + Duration maxClockDuration; + } + + //////////////////////////////////////////////////////////////// + // Events // + //////////////////////////////////////////////////////////////// + + /// @notice Emitted when the game is resolved. + /// @param status The status of the game after resolution. + event Resolved(GameStatus indexed status); + + /// @notice Emitted when a new claim is added to the DAG by `claimant` + /// @param parentIndex The index within the `claimData` array of the parent claim + /// @param claim The claim being added + /// @param claimant The address of the claimant + event Move(uint256 indexed parentIndex, Claim indexed claim, address indexed claimant); + + /// @notice Emitted when the game is closed. + event GameClosed(BondDistributionMode bondDistributionMode); + + //////////////////////////////////////////////////////////////// + // State Vars // + //////////////////////////////////////////////////////////////// + + /// @notice The max depth of the game. + uint256 internal immutable MAX_GAME_DEPTH; + + /// @notice The max depth of the output bisection portion of the position tree. Immediately beneath + /// this depth, execution trace bisection begins. + uint256 internal immutable SPLIT_DEPTH; + + /// @notice The maximum duration that may accumulate on a team's chess clock before they may no longer respond. + Duration internal immutable MAX_CLOCK_DURATION; + + /// @notice The game type ID. + GameType internal immutable GAME_TYPE; + + /// @notice The duration of the clock extension. Will be doubled if the grandchild is the root claim of an execution + /// trace bisection subgame. + Duration internal immutable CLOCK_EXTENSION; + + /// @notice The global root claim's position is always at gindex 1. + Position internal constant ROOT_POSITION = Position.wrap(1); + + /// @notice The index of the block number in the RLP-encoded block header. + /// @dev Consensus encoding reference: + /// https://github.com/paradigmxyz/reth/blob/5f82993c23164ce8ccdc7bf3ae5085205383a5c8/crates/primitives/src/header.rs#L368 + uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; + + /// @notice Semantic version. + /// @custom:semver 2.1.0 + function version() public pure virtual returns (string memory) { + return "2.1.0"; + } + + /// @notice The starting timestamp of the game + Timestamp public createdAt; + + /// @notice The timestamp of the game's global resolution. + Timestamp public resolvedAt; + + /// @notice Returns the current status of the game. + GameStatus public status; + + /// @notice Flag for the `initialize` function to prevent re-initialization. + bool internal initialized; + + /// @notice Flag for whether or not the L2 block number claim has been invalidated via `challengeRootL2Block`. + bool public l2BlockNumberChallenged; + + /// @notice The challenger of the L2 block number claim. Should always be `address(0)` if `l2BlockNumberChallenged` + /// is `false`. Should be the address of the challenger if `l2BlockNumberChallenged` is `true`. + address public l2BlockNumberChallenger; + + /// @notice An append-only array of all claims made during the dispute game. + ClaimData[] public claimData; + + /// @notice Credited balances for winning participants. + mapping(address => uint256) public normalModeCredit; + + /// @notice A mapping to allow for constant-time lookups of existing claims. + mapping(Hash => bool) public claims; + + /// @notice A mapping of subgames rooted at a claim index to other claim indices in the subgame. + mapping(uint256 => uint256[]) public subgames; + + /// @notice A mapping of resolved subgames rooted at a claim index. + mapping(uint256 => bool) public resolvedSubgames; + + /// @notice A mapping of claim indices to resolution checkpoints. + mapping(uint256 => ResolutionCheckpoint) public resolutionCheckpoints; + + /// @notice The latest finalized output root, serving as the anchor for output bisection. + Proposal public startingOutputRoot; + + /// @notice A boolean for whether or not the game type was respected when the game was created. + bool public wasRespectedGameTypeWhenCreated; + + /// @notice A mapping of each claimant's refund mode credit. + mapping(address => uint256) public refundModeCredit; + + /// @notice A mapping of whether a claimant has unlocked their credit. + mapping(address => bool) public hasUnlockedCredit; + + /// @notice The bond distribution mode of the game. + BondDistributionMode public bondDistributionMode; + + /// @param _params Parameters for creating a new FaultDisputeGame. + constructor(GameConstructorParams memory _params) { + // The max game depth may not be greater than `LibPosition.MAX_POSITION_BITLEN - 1`. + if (_params.maxGameDepth > LibPosition.MAX_POSITION_BITLEN - 1) revert MaxDepthTooLarge(); + + // The split depth plus one cannot be greater than or equal to the max game depth. We add + // an additional depth to the split depth to avoid a bug in trace ancestor lookup. We know + // that the case where the split depth is the max value for uint256 is equivalent to the + // second check though we do need to check it explicitly to avoid an overflow. + if (_params.splitDepth == type(uint256).max || _params.splitDepth + 1 >= _params.maxGameDepth) { + revert InvalidSplitDepth(); + } + + // The split depth cannot be 0 or 1 to stay in bounds of clock extension arithmetic. + if (_params.splitDepth < 2) revert InvalidSplitDepth(); + + // Block type(uint32).max from being used as a game type so that it can be used in the + // OptimismPortal respected game type trick. + if (_params.gameType.raw() == type(uint32).max) revert ReservedGameType(); + + // Validate clock extension bounds that don't require VM access. + // The split depth extension is always clockExtension * 2. + uint256 splitDepthExtension = uint256(_params.clockExtension.raw()) * 2; + + // The split depth extension must fit into a uint64. + if (splitDepthExtension > type(uint64).max) revert InvalidClockExtension(); + + // The split depth extension may not be greater than the maximum clock duration. + if (uint64(splitDepthExtension) > _params.maxClockDuration.raw()) revert InvalidClockExtension(); + + // Set up initial game state. + GAME_TYPE = _params.gameType; + MAX_GAME_DEPTH = _params.maxGameDepth; + SPLIT_DEPTH = _params.splitDepth; + CLOCK_EXTENSION = _params.clockExtension; + MAX_CLOCK_DURATION = _params.maxClockDuration; + } + + /// @notice Initializes the contract. + /// @dev This function may only be called once. + function initialize() public payable virtual { + // SAFETY: Any revert in this function will bubble up to the DisputeGameFactory and + // prevent the game from being created. + // + // Implicit assumptions: + // - The `gameStatus` state variable defaults to 0, which is `GameStatus.IN_PROGRESS` + // - The dispute game factory will enforce the required bond to initialize the game. + // + // Explicit checks: + // - The game must not have already been initialized. + // - An output root cannot be proposed at or before the starting block number. + + // INVARIANT: The game must not have already been initialized. + if (initialized) revert AlreadyInitialized(); + + // Revert if the calldata size is not the expected length. + // + // This is to prevent adding extra or omitting bytes from to `extraData` that result in a different game UUID + // in the factory, but are not used by the game, which would allow for multiple dispute games for the same + // output proposal to be created. + if (msg.data.length != expectedInitCallDataLength()) revert BadExtraData(); + + // Grab the latest anchor root. + (Hash root, uint256 rootBlockNumber) = anchorStateRegistry().getAnchorRoot(); + + // Should only happen if this is a new game type that hasn't been set up yet. + if (root.raw() == bytes32(0)) revert AnchorRootNotFound(); + + // Set the starting proposal. + startingOutputRoot = Proposal({ l2SequenceNumber: rootBlockNumber, root: root }); + + // Do not allow the game to be initialized if the root claim corresponds to a block at or before the + // configured starting block number. + if (l2BlockNumber() <= rootBlockNumber) revert UnexpectedRootClaim(rootClaim()); + + // Validate parameters that require access to the VM. + // The PreimageOracle challenge period must fit into uint64 so we can safely use it here. + if (vm().oracle().challengePeriod() > type(uint64).max) revert InvalidChallengePeriod(); + + // Determine the maximum clock extension which is either the split depth extension or the + // maximum game depth extension depending on the configuration of these contracts. + uint256 splitDepthExtension = uint256(CLOCK_EXTENSION.raw()) * 2; + uint256 maxGameDepthExtension = uint256(CLOCK_EXTENSION.raw()) + uint64(vm().oracle().challengePeriod()); + uint256 maxClockExtension = Math.max(splitDepthExtension, maxGameDepthExtension); + + // The maximum clock extension must fit into a uint64. + if (maxClockExtension > type(uint64).max) revert InvalidClockExtension(); + + // The maximum clock extension may not be greater than the maximum clock duration. + if (uint64(maxClockExtension) > MAX_CLOCK_DURATION.raw()) revert InvalidClockExtension(); + + // Set the root claim + claimData.push( + ClaimData({ + parentIndex: type(uint32).max, + counteredBy: address(0), + claimant: gameCreator(), + bond: uint128(msg.value), + claim: rootClaim(), + position: ROOT_POSITION, + clock: LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))) + }) + ); + + // Set the game as initialized. + initialized = true; + + // Deposit the bond. + refundModeCredit[gameCreator()] += msg.value; + weth().deposit{ value: msg.value }(); + + // Set the game's starting timestamp + createdAt = Timestamp.wrap(uint64(block.timestamp)); + + // Set whether the game type was respected when the game was created. + wasRespectedGameTypeWhenCreated = + GameType.unwrap(anchorStateRegistry().respectedGameType()) == GameType.unwrap(GAME_TYPE); + } + + /// @notice Returns the expected calldata length for the initialize method + function expectedInitCallDataLength() internal pure returns (uint256) { + // Expected length: 6 bytes + immutable args byte count + // - 4 bytes: selector + // - 2 bytes: CWIA length prefix + // - n bytes: Immutable args data + return 6 + immutableArgsByteCount(); + } + + /// @notice Returns the byte count of the immutable args for this contract. + function immutableArgsByteCount() internal pure virtual returns (uint256) { + // Expected length: 240 bytes + // - 20 bytes: creator address + // - 32 bytes: root claim + // - 32 bytes: l1 head + // - 32 bytes: extraData + // - 32 bytes: absolutePrestate + // - 20 bytes: vm address + // - 20 bytes: anchorStateRegistry address + // - 20 bytes: weth address + // - 32 bytes: l2ChainId + return 240; + } + + //////////////////////////////////////////////////////////////// + // `IFaultDisputeGame` impl // + //////////////////////////////////////////////////////////////// + + /// @notice Perform an instruction step via an on-chain fault proof processor. + /// @dev This function should point to a fault proof processor in order to execute + /// a step in the fault proof program on-chain. The interface of the fault proof + /// processor contract should adhere to the `IBigStepper` interface. + /// @param _claimIndex The index of the challenged claim within `claimData`. + /// @param _isAttack Whether or not the step is an attack or a defense. + /// @param _stateData The stateData of the step is the preimage of the claim at the given + /// prestate, which is at `_stateIndex` if the move is an attack and `_claimIndex` if + /// the move is a defense. If the step is an attack on the first instruction, it is + /// the absolute prestate of the fault proof VM. + /// @param _proof Proof to access memory nodes in the VM's merkle state tree. + function step( + uint256 _claimIndex, + bool _isAttack, + bytes calldata _stateData, + bytes calldata _proof + ) + public + virtual + { + // INVARIANT: Steps cannot be made unless the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + // Get the parent. If it does not exist, the call will revert with OOB. + ClaimData storage parent = claimData[_claimIndex]; + + // Pull the parent position out of storage. + Position parentPos = parent.position; + // Determine the position of the step. + Position stepPos = parentPos.move(_isAttack); + + // INVARIANT: A step cannot be made unless the move position is 1 below the `MAX_GAME_DEPTH` + if (stepPos.depth() != MAX_GAME_DEPTH + 1) revert InvalidParent(); + + // Determine the expected pre & post states of the step. + Claim preStateClaim; + ClaimData storage postState; + if (_isAttack) { + // If the step position's index at depth is 0, the prestate is the absolute + // prestate. + // If the step is an attack at a trace index > 0, the prestate exists elsewhere in + // the game state. + // NOTE: We localize the `indexAtDepth` for the current execution trace subgame by finding + // the remainder of the index at depth divided by 2 ** (MAX_GAME_DEPTH - SPLIT_DEPTH), + // which is the number of leaves in each execution trace subgame. This is so that we can + // determine whether or not the step position is represents the `ABSOLUTE_PRESTATE`. + preStateClaim = (stepPos.indexAtDepth() % (1 << (MAX_GAME_DEPTH - SPLIT_DEPTH))) == 0 + ? absolutePrestate() + : _findTraceAncestor(Position.wrap(parentPos.raw() - 1), parent.parentIndex, false).claim; + // For all attacks, the poststate is the parent claim. + postState = parent; + } else { + // If the step is a defense, the poststate exists elsewhere in the game state, + // and the parent claim is the expected pre-state. + preStateClaim = parent.claim; + postState = _findTraceAncestor(Position.wrap(parentPos.raw() + 1), parent.parentIndex, false); + } + + // INVARIANT: The prestate is always invalid if the passed `_stateData` is not the + // preimage of the prestate claim hash. + // We ignore the highest order byte of the digest because it is used to + // indicate the VM Status and is added after the digest is computed. + if (keccak256(_stateData) << 8 != preStateClaim.raw() << 8) revert InvalidPrestate(); + + // Compute the local preimage context for the step. + Hash uuid = _findLocalContext(_claimIndex); + + // INVARIANT: If a step is an attack, the poststate is valid if the step produces + // the same poststate hash as the parent claim's value. + // If a step is a defense: + // 1. If the parent claim and the found post state agree with each other + // (depth diff % 2 == 0), the step is valid if it produces the same + // state hash as the post state's claim. + // 2. If the parent claim and the found post state disagree with each other + // (depth diff % 2 != 0), the parent cannot be countered unless the step + // produces the same state hash as `postState.claim`. + // SAFETY: While the `attack` path does not need an extra check for the post + // state's depth in relation to the parent, we don't need another + // branch because (n - n) % 2 == 0. + bool validStep = vm().step(_stateData, _proof, uuid.raw()) == postState.claim.raw(); + bool parentPostAgree = (parentPos.depth() - postState.position.depth()) % 2 == 0; + if (parentPostAgree == validStep) revert ValidStep(); + + // INVARIANT: A step cannot be made against a claim for a second time. + if (parent.counteredBy != address(0)) revert DuplicateStep(); + + // Set the parent claim as countered. We do not need to append a new claim to the game; + // instead, we can just set the existing parent as countered. + parent.counteredBy = msg.sender; + } + + /// @notice Generic move function, used for both `attack` and `defend` moves. + /// @param _disputed The disputed `Claim`. + /// @param _challengeIndex The index of the claim being moved against. + /// @param _claim The claim at the next logical position in the game. + /// @param _isAttack Whether or not the move is an attack or defense. + function move(Claim _disputed, uint256 _challengeIndex, Claim _claim, bool _isAttack) public payable virtual { + // INVARIANT: Moves cannot be made unless the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + // Get the parent. If it does not exist, the call will revert with OOB. + ClaimData memory parent = claimData[_challengeIndex]; + + // INVARIANT: The claim at the _challengeIndex must be the disputed claim. + if (Claim.unwrap(parent.claim) != Claim.unwrap(_disputed)) revert InvalidDisputedClaimIndex(); + + // Compute the position that the claim commits to. Because the parent's position is already + // known, we can compute the next position by moving left or right depending on whether + // or not the move is an attack or defense. + Position parentPos = parent.position; + Position nextPosition = parentPos.move(_isAttack); + uint256 nextPositionDepth = nextPosition.depth(); + + // INVARIANT: A defense can never be made against the root claim of either the output root game or any + // of the execution trace bisection subgames. This is because the root claim commits to the + // entire state. Therefore, the only valid defense is to do nothing if it is agreed with. + if ((_challengeIndex == 0 || nextPositionDepth == SPLIT_DEPTH + 2) && !_isAttack) { + revert CannotDefendRootClaim(); + } + + // INVARIANT: No moves against the root claim can be made after it has been challenged with + // `challengeRootL2Block`.` + if (l2BlockNumberChallenged && _challengeIndex == 0) revert L2BlockNumberChallenged(); + + // INVARIANT: A move can never surpass the `MAX_GAME_DEPTH`. The only option to counter a + // claim at this depth is to perform a single instruction step on-chain via + // the `step` function to prove that the state transition produces an unexpected + // post-state. + if (nextPositionDepth > MAX_GAME_DEPTH) revert GameDepthExceeded(); + + // When the next position surpasses the split depth (i.e., it is the root claim of an execution + // trace bisection sub-game), we need to perform some extra verification steps. + if (nextPositionDepth == SPLIT_DEPTH + 1) { + _verifyExecBisectionRoot(_claim, _challengeIndex, parentPos, _isAttack); + } + + // INVARIANT: The `msg.value` must exactly equal the required bond. + if (getRequiredBond(nextPosition) != msg.value) revert IncorrectBondAmount(); + + // Compute the duration of the next clock. This is done by adding the duration of the + // grandparent claim to the difference between the current block timestamp and the + // parent's clock timestamp. + Duration nextDuration = getChallengerDuration(_challengeIndex); + + // INVARIANT: A move can never be made once its clock has exceeded `MAX_CLOCK_DURATION` + // seconds of time. + if (nextDuration.raw() == MAX_CLOCK_DURATION.raw()) revert ClockTimeExceeded(); + + // Clock extension is a mechanism that automatically extends the clock for a potential + // grandchild claim when there would be less than the clock extension time left if a player + // is forced to inherit another team's clock when countering a freeloader claim. Exact + // amount of clock extension time depends exactly where we are within the game. + uint64 actualExtension; + if (nextPositionDepth == MAX_GAME_DEPTH - 1) { + // If the next position is `MAX_GAME_DEPTH - 1` then we're about to execute a step. Our + // clock extension must therefore account for the LPP challenge period in addition to + // the standard clock extension. + actualExtension = CLOCK_EXTENSION.raw() + uint64(vm().oracle().challengePeriod()); + } else if (nextPositionDepth == SPLIT_DEPTH - 1) { + // If the next position is `SPLIT_DEPTH - 1` then we're about to begin an execution + // trace bisection and we need to give extra time for the off-chain challenge agent to + // be able to generate the initial instruction trace on the native FPVM. + actualExtension = CLOCK_EXTENSION.raw() * 2; + } else { + // Otherwise, we just use the standard clock extension. + actualExtension = CLOCK_EXTENSION.raw(); + } + + // Check if we need to apply the clock extension. + if (nextDuration.raw() > MAX_CLOCK_DURATION.raw() - actualExtension) { + nextDuration = Duration.wrap(MAX_CLOCK_DURATION.raw() - actualExtension); + } + + // Construct the next clock with the new duration and the current block timestamp. + Clock nextClock = LibClock.wrap(nextDuration, Timestamp.wrap(uint64(block.timestamp))); + + // INVARIANT: There cannot be multiple identical claims with identical moves on the same challengeIndex. Multiple + // claims at the same position may dispute the same challengeIndex. However, they must have different + // values. + Hash claimHash = _claim.hashClaimPos(nextPosition, _challengeIndex); + if (claims[claimHash]) revert ClaimAlreadyExists(); + claims[claimHash] = true; + + // Create the new claim. + claimData.push( + ClaimData({ + parentIndex: uint32(_challengeIndex), + // This is updated during subgame resolution + counteredBy: address(0), + claimant: msg.sender, + bond: uint128(msg.value), + claim: _claim, + position: nextPosition, + clock: nextClock + }) + ); + + // Update the subgame rooted at the parent claim. + subgames[_challengeIndex].push(claimData.length - 1); + + // Deposit the bond. + refundModeCredit[msg.sender] += msg.value; + weth().deposit{ value: msg.value }(); + + // Emit the appropriate event for the attack or defense. + emit Move(_challengeIndex, _claim, msg.sender); + } + + /// @notice Attack a disagreed upon `Claim`. + /// @param _disputed The `Claim` being attacked. + /// @param _parentIndex Index of the `Claim` to attack in the `claimData` array. This must match the `_disputed` + /// claim. + /// @param _claim The `Claim` at the relative attack position. + function attack(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable { + move(_disputed, _parentIndex, _claim, true); + } + + /// @notice Defend an agreed upon `Claim`. + /// @notice _disputed The `Claim` being defended. + /// @param _parentIndex Index of the claim to defend in the `claimData` array. This must match the `_disputed` + /// claim. + /// @param _claim The `Claim` at the relative defense position. + function defend(Claim _disputed, uint256 _parentIndex, Claim _claim) external payable { + move(_disputed, _parentIndex, _claim, false); + } + + /// @notice Posts the requested local data to the VM's `PreimageOralce`. + /// @param _ident The local identifier of the data to post. + /// @param _execLeafIdx The index of the leaf claim in an execution subgame that requires the local data for a step. + /// @param _partOffset The offset of the data to post. + function addLocalData(uint256 _ident, uint256 _execLeafIdx, uint256 _partOffset) external { + // INVARIANT: Local data can only be added if the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + (Claim starting, Position startingPos, Claim disputed, Position disputedPos) = + _findStartingAndDisputedOutputs(_execLeafIdx); + Hash uuid = _computeLocalContext(starting, startingPos, disputed, disputedPos); + + IPreimageOracle oracle = vm().oracle(); + if (_ident == LocalPreimageKey.L1_HEAD_HASH) { + // Load the L1 head hash + oracle.loadLocalData(_ident, uuid.raw(), l1Head().raw(), 32, _partOffset); + } else if (_ident == LocalPreimageKey.STARTING_OUTPUT_ROOT) { + // Load the starting proposal's output root. + oracle.loadLocalData(_ident, uuid.raw(), starting.raw(), 32, _partOffset); + } else if (_ident == LocalPreimageKey.DISPUTED_OUTPUT_ROOT) { + // Load the disputed proposal's output root + oracle.loadLocalData(_ident, uuid.raw(), disputed.raw(), 32, _partOffset); + } else if (_ident == LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER) { + // Load the disputed proposal's L2 block number as a big-endian uint64 in the + // high order 8 bytes of the word. + + // We add the index at depth + 1 to the starting block number to get the disputed L2 + // block number. + uint256 l2Number = startingOutputRoot.l2SequenceNumber + disputedPos.traceIndex(SPLIT_DEPTH) + 1; + + // Choose the minimum between the `l2BlockNumber` claim and the bisected-to L2 block number. + l2Number = l2Number < l2BlockNumber() ? l2Number : l2BlockNumber(); + + oracle.loadLocalData(_ident, uuid.raw(), bytes32(l2Number << 0xC0), 8, _partOffset); + } else if (_ident == LocalPreimageKey.CHAIN_ID) { + // Load the chain ID as a big-endian uint64 in the high order 8 bytes of the word. + oracle.loadLocalData(_ident, uuid.raw(), bytes32(l2ChainId() << 0xC0), 8, _partOffset); + } else { + revert InvalidLocalIdent(); + } + } + + /// @notice Returns the number of children that still need to be resolved in order to fully resolve a subgame rooted + /// at `_claimIndex`. + /// @param _claimIndex The subgame root claim's index within `claimData`. + /// @return numRemainingChildren_ The number of children that still need to be checked to resolve the subgame. + function getNumToResolve(uint256 _claimIndex) public view returns (uint256 numRemainingChildren_) { + ResolutionCheckpoint storage checkpoint = resolutionCheckpoints[_claimIndex]; + uint256[] storage challengeIndices = subgames[_claimIndex]; + uint256 challengeIndicesLen = challengeIndices.length; + + numRemainingChildren_ = challengeIndicesLen - checkpoint.subgameIndex; + } + + /// @notice The l2BlockNumber of the disputed output root in the `L2OutputOracle`. + function l2BlockNumber() public pure returns (uint256 l2BlockNumber_) { + l2BlockNumber_ = _getArgUint256(84); + } + + /// @notice The l2SequenceNumber of the disputed output root in the `L2OutputOracle` (in this case - block number). + function l2SequenceNumber() public pure returns (uint256 l2SequenceNumber_) { + l2SequenceNumber_ = l2BlockNumber(); + } + + /// @notice Only the starting block number of the game. + function startingBlockNumber() external view returns (uint256 startingBlockNumber_) { + startingBlockNumber_ = startingOutputRoot.l2SequenceNumber; + } + + /// @notice Starting output root and block number of the game. + function startingRootHash() external view returns (Hash startingRootHash_) { + startingRootHash_ = startingOutputRoot.root; + } + + /// @notice Challenges the root L2 block number by providing the preimage of the output root and the L2 block header + /// and showing that the committed L2 block number is incorrect relative to the claimed L2 block number. + /// @param _outputRootProof The output root proof. + /// @param _headerRLP The RLP-encoded L2 block header. + function challengeRootL2Block( + Types.OutputRootProof calldata _outputRootProof, + bytes calldata _headerRLP + ) + external + { + // INVARIANT: Moves cannot be made unless the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + // The root L2 block claim can only be challenged once. + if (l2BlockNumberChallenged) revert L2BlockNumberChallenged(); + + // Verify the output root preimage. + if (Hashing.hashOutputRootProof(_outputRootProof) != rootClaim().raw()) revert InvalidOutputRootProof(); + + // Verify the block hash preimage. + if (keccak256(_headerRLP) != _outputRootProof.latestBlockhash) revert InvalidHeaderRLP(); + + // Decode the header RLP to find the number of the block. In the consensus encoding, the timestamp + // is the 9th element in the list that represents the block header. + RLPReader.RLPItem[] memory headerContents = RLPReader.readList(RLPReader.toRLPItem(_headerRLP)); + bytes memory rawBlockNumber = RLPReader.readBytes(headerContents[HEADER_BLOCK_NUMBER_INDEX]); + + // Sanity check the block number string length. + if (rawBlockNumber.length > 32) revert InvalidHeaderRLP(); + + // Convert the raw, left-aligned block number to a uint256 by aligning it as a big-endian + // number in the low-order bytes of a 32-byte word. + // + // SAFETY: The length of `rawBlockNumber` is checked above to ensure it is at most 32 bytes. + uint256 blockNumber; + assembly { + blockNumber := shr(shl(0x03, sub(0x20, mload(rawBlockNumber))), mload(add(rawBlockNumber, 0x20))) + } + + // Ensure the block number does not match the block number claimed in the dispute game. + if (blockNumber == l2BlockNumber()) revert BlockNumberMatches(); + + // Issue a special counter to the root claim. This counter will always win the root claim subgame, and receive + // the bond from the root claimant. + l2BlockNumberChallenger = msg.sender; + l2BlockNumberChallenged = true; + } + + //////////////////////////////////////////////////////////////// + // `IDisputeGame` impl // + //////////////////////////////////////////////////////////////// + + /// @notice If all necessary information has been gathered, this function should mark the game + /// status as either `CHALLENGER_WINS` or `DEFENDER_WINS` and return the status of + /// the resolved game. It is at this stage that the bonds should be awarded to the + /// necessary parties. + /// @dev May only be called if the `status` is `IN_PROGRESS`. + /// @return status_ The status of the game after resolution. + function resolve() external returns (GameStatus status_) { + // INVARIANT: Resolution cannot occur unless the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + // INVARIANT: Resolution cannot occur unless the absolute root subgame has been resolved. + if (!resolvedSubgames[0]) revert OutOfOrderResolution(); + + // Update the global game status; The dispute has concluded. + status_ = claimData[0].counteredBy == address(0) ? GameStatus.DEFENDER_WINS : GameStatus.CHALLENGER_WINS; + resolvedAt = Timestamp.wrap(uint64(block.timestamp)); + + // Update the status and emit the resolved event, note that we're performing an assignment here. + emit Resolved(status = status_); + } + + /// @notice Resolves the subgame rooted at the given claim index. `_numToResolve` specifies how many children of + /// the subgame will be checked in this call. If `_numToResolve` is less than the number of children, an + /// internal cursor will be updated and this function may be called again to complete resolution of the + /// subgame. + /// @dev This function must be called bottom-up in the DAG + /// A subgame is a tree of claims that has a maximum depth of 1. + /// A subgame root claims is valid if, and only if, all of its child claims are invalid. + /// At the deepest level in the DAG, a claim is invalid if there's a successful step against it. + /// @param _claimIndex The index of the subgame root claim to resolve. + /// @param _numToResolve The number of subgames to resolve in this call. If the input is `0`, and this is the first + /// page, this function will attempt to check all of the subgame's children at once. + function resolveClaim(uint256 _claimIndex, uint256 _numToResolve) external { + // INVARIANT: Resolution cannot occur unless the game is currently in progress. + if (status != GameStatus.IN_PROGRESS) revert GameNotInProgress(); + + ClaimData storage subgameRootClaim = claimData[_claimIndex]; + Duration challengeClockDuration = getChallengerDuration(_claimIndex); + + // INVARIANT: Cannot resolve a subgame unless the clock of its would-be counter has expired + // INVARIANT: Assuming ordered subgame resolution, challengeClockDuration is always >= MAX_CLOCK_DURATION if all + // descendant subgames are resolved + if (challengeClockDuration.raw() < MAX_CLOCK_DURATION.raw()) revert ClockNotExpired(); + + // INVARIANT: Cannot resolve a subgame twice. + if (resolvedSubgames[_claimIndex]) revert ClaimAlreadyResolved(); + + uint256[] storage challengeIndices = subgames[_claimIndex]; + uint256 challengeIndicesLen = challengeIndices.length; + + // Uncontested claims are resolved implicitly unless they are the root claim. Pay out the bond to the claimant + // and return early. + if (challengeIndicesLen == 0 && _claimIndex != 0) { + // In the event that the parent claim is at the max depth, there will always be 0 subgames. If the + // `counteredBy` field is set and there are no subgames, this implies that the parent claim was successfully + // stepped against. In this case, we pay out the bond to the party that stepped against the parent claim. + // Otherwise, the parent claim is uncontested, and the bond is returned to the claimant. + address counteredBy = subgameRootClaim.counteredBy; + address recipient = counteredBy == address(0) ? subgameRootClaim.claimant : counteredBy; + _distributeBond(recipient, subgameRootClaim); + resolvedSubgames[_claimIndex] = true; + return; + } + + // Fetch the resolution checkpoint from storage. + ResolutionCheckpoint memory checkpoint = resolutionCheckpoints[_claimIndex]; + + // If the checkpoint does not currently exist, initialize the current left most position as max u128. + if (!checkpoint.initialCheckpointComplete) { + checkpoint.leftmostPosition = Position.wrap(type(uint128).max); + checkpoint.initialCheckpointComplete = true; + + // If `_numToResolve == 0`, assume that we can check all child subgames in this one callframe. + if (_numToResolve == 0) _numToResolve = challengeIndicesLen; + } + + // Assume parent is honest until proven otherwise + uint256 lastToResolve = checkpoint.subgameIndex + _numToResolve; + uint256 finalCursor = lastToResolve > challengeIndicesLen ? challengeIndicesLen : lastToResolve; + for (uint256 i = checkpoint.subgameIndex; i < finalCursor; i++) { + uint256 challengeIndex = challengeIndices[i]; + + // INVARIANT: Cannot resolve a subgame containing an unresolved claim + if (!resolvedSubgames[challengeIndex]) revert OutOfOrderResolution(); + + ClaimData storage claim = claimData[challengeIndex]; + + // If the child subgame is uncountered and further left than the current left-most counter, + // update the parent subgame's `countered` address and the current `leftmostCounter`. + // The left-most correct counter is preferred in bond payouts in order to discourage attackers + // from countering invalid subgame roots via an invalid defense position. As such positions + // cannot be correctly countered. + // Note that correctly positioned defense, but invalid claimes can still be successfully countered. + if (claim.counteredBy == address(0) && checkpoint.leftmostPosition.raw() > claim.position.raw()) { + checkpoint.counteredBy = claim.claimant; + checkpoint.leftmostPosition = claim.position; + } + } + + // Increase the checkpoint's cursor position by the number of children that were checked. + checkpoint.subgameIndex = uint32(finalCursor); + + // Persist the checkpoint and allow for continuing in a separate transaction, if resolution is not already + // complete. + resolutionCheckpoints[_claimIndex] = checkpoint; + + // If all children have been traversed in the above loop, the subgame may be resolved. Otherwise, persist the + // checkpoint and allow for continuation in a separate transaction. + if (checkpoint.subgameIndex == challengeIndicesLen) { + address countered = checkpoint.counteredBy; + + // Mark the subgame as resolved. + resolvedSubgames[_claimIndex] = true; + + // Distribute the bond to the appropriate party. + if (_claimIndex == 0 && l2BlockNumberChallenged) { + // Special case: If the root claim has been challenged with the `challengeRootL2Block` function, + // the bond is always paid out to the issuer of that challenge. + address challenger = l2BlockNumberChallenger; + _distributeBond(challenger, subgameRootClaim); + subgameRootClaim.counteredBy = challenger; + } else { + // If the parent was not successfully countered, pay out the parent's bond to the claimant. + // If the parent was successfully countered, pay out the parent's bond to the challenger. + _distributeBond(countered == address(0) ? subgameRootClaim.claimant : countered, subgameRootClaim); + + // Once a subgame is resolved, we percolate the result up the DAG so subsequent calls to + // resolveClaim will not need to traverse this subgame. + subgameRootClaim.counteredBy = countered; + } + } + } + + /// @notice Getter for the creator of the dispute game. + /// @dev `clones-with-immutable-args` argument #1 + /// @return creator_ The creator of the dispute game. + function gameCreator() public pure returns (address creator_) { + creator_ = _getArgAddress(0); + } + + /// @notice Getter for the root claim. + /// @dev `clones-with-immutable-args` argument #2 + /// @return rootClaim_ The root claim of the DisputeGame. + function rootClaim() public pure returns (Claim rootClaim_) { + rootClaim_ = Claim.wrap(_getArgBytes32(20)); + } + + /// @notice Getter for the parent hash of the L1 block when the dispute game was created. + /// @dev `clones-with-immutable-args` argument #3 + /// @return l1Head_ The parent hash of the L1 block when the dispute game was created. + function l1Head() public pure returns (Hash l1Head_) { + l1Head_ = Hash.wrap(_getArgBytes32(52)); + } + + /// @notice Getter for the extra data. + /// @dev `clones-with-immutable-args` argument #4 + /// @return extraData_ Any extra data supplied to the dispute game contract by the creator. + function extraData() public pure returns (bytes memory extraData_) { + // The extra data starts at the second word within the cwia calldata and + // is 32 bytes long. + extraData_ = _getArgBytes(84, 32); + } + + /// @notice Getter for the absolute prestate of the instruction trace. + /// @dev `clones-with-immutable-args` argument #5 + /// @return absolutePrestate_ The absolute prestate of the instruction trace. + function absolutePrestate() public pure returns (Claim absolutePrestate_) { + absolutePrestate_ = Claim.wrap(_getArgBytes32(116)); + } + + /// @notice Getter for the VM implementation. + /// @dev `clones-with-immutable-args` argument #6 + /// @return vm_ The onchain VM implementation address. + function vm() public pure returns (IBigStepper vm_) { + vm_ = IBigStepper(_getArgAddress(148)); + } + + /// @notice Getter for the anchor state registry. + /// @dev `clones-with-immutable-args` argument #7 + /// @return registry_ The anchor state registry contract address. + function anchorStateRegistry() public pure returns (IAnchorStateRegistry registry_) { + registry_ = IAnchorStateRegistry(_getArgAddress(168)); + } + + /// @notice Getter for the WETH contract. + /// @dev `clones-with-immutable-args` argument #8 + /// @return weth_ The WETH contract for holding ETH. + function weth() public pure returns (IDelayedWETH weth_) { + weth_ = IDelayedWETH(payable(_getArgAddress(188))); + } + + /// @notice Getter for the L2 chain ID. + /// @dev `clones-with-immutable-args` argument #9 + /// @return l2ChainId_ The L2 chain ID. + function l2ChainId() public pure returns (uint256 l2ChainId_) { + l2ChainId_ = _getArgUint256(208); + } + + /// @notice A compliant implementation of this interface should return the components of the + /// game UUID's preimage provided in the cwia payload. The preimage of the UUID is + /// constructed as `keccak256(gameType . rootClaim . extraData)` where `.` denotes + /// concatenation. + /// @return gameType_ The type of proof system being used. + /// @return rootClaim_ The root claim of the DisputeGame. + /// @return extraData_ Any extra data supplied to the dispute game contract by the creator. + function gameData() external view returns (GameType gameType_, Claim rootClaim_, bytes memory extraData_) { + gameType_ = gameType(); + rootClaim_ = rootClaim(); + extraData_ = extraData(); + } + + /// @notice Getter for the game type. + /// @dev The reference impl should be entirely different depending on the type (fault, validity) + /// i.e. The game type should indicate the security model. + /// @return gameType_ The type of proof system being used. + function gameType() public view returns (GameType gameType_) { + gameType_ = GAME_TYPE; + } + + //////////////////////////////////////////////////////////////// + // MISC EXTERNAL // + //////////////////////////////////////////////////////////////// + + /// @notice Returns the required bond for a given move kind. + /// @param _position The position of the bonded interaction. + /// @return requiredBond_ The required ETH bond for the given move, in wei. + function getRequiredBond(Position _position) public view returns (uint256 requiredBond_) { + uint256 depth = uint256(_position.depth()); + if (depth > MAX_GAME_DEPTH) revert GameDepthExceeded(); + + // Values taken from Big Bonds v1.5 (TM) spec. + uint256 assumedBaseFee = 200 gwei; + uint256 baseGasCharged = 400_000; + uint256 highGasCharged = 300_000_000; + + // Goal here is to compute the fixed multiplier that will be applied to the base gas + // charged to get the required gas amount for the given depth. We apply this multiplier + // some `n` times where `n` is the depth of the position. We are looking for some number + // that, when multiplied by itself `MAX_GAME_DEPTH` times and then multiplied by the base + // gas charged, will give us the maximum gas that we want to charge. + // We want to solve for (highGasCharged/baseGasCharged) ** (1/MAX_GAME_DEPTH). + // We know that a ** (b/c) is equal to e ** (ln(a) * (b/c)). + // We can compute e ** (ln(a) * (b/c)) quite easily with FixedPointMathLib. + + // Set up a, b, and c. + uint256 a = highGasCharged / baseGasCharged; + uint256 b = FixedPointMathLib.WAD; + uint256 c = MAX_GAME_DEPTH * FixedPointMathLib.WAD; + + // Compute ln(a). + // slither-disable-next-line divide-before-multiply + uint256 lnA = uint256(FixedPointMathLib.lnWad(int256(a * FixedPointMathLib.WAD))); + + // Computes (b / c) with full precision using WAD = 1e18. + uint256 bOverC = FixedPointMathLib.divWad(b, c); + + // Compute e ** (ln(a) * (b/c)) + // sMulWad can be used here since WAD = 1e18 maintains the same precision. + uint256 numerator = FixedPointMathLib.mulWad(lnA, bOverC); + int256 base = FixedPointMathLib.expWad(int256(numerator)); + + // Compute the required gas amount. + int256 rawGas = FixedPointMathLib.powWad(base, int256(depth * FixedPointMathLib.WAD)); + uint256 requiredGas = FixedPointMathLib.mulWad(baseGasCharged, uint256(rawGas)); + + // Compute the required bond. + requiredBond_ = assumedBaseFee * requiredGas; + } + + /// @notice Claim the credit belonging to the recipient address. Reverts if the game isn't + /// finalized, if the recipient has no credit to claim, or if the bond transfer + /// fails. If the game is finalized but no bond has been paid out yet, this method + /// will determine the bond distribution mode and also try to update anchor game. + /// @param _recipient The owner and recipient of the credit. + function claimCredit(address _recipient) external { + // Close out the game and determine the bond distribution mode if not already set. + // We call this as part of claim credit to reduce the number of additional calls that a + // Challenger needs to make to this contract. + closeGame(); + + // Fetch the recipient's credit balance based on the bond distribution mode. + uint256 recipientCredit; + if (bondDistributionMode == BondDistributionMode.REFUND) { + recipientCredit = refundModeCredit[_recipient]; + } else if (bondDistributionMode == BondDistributionMode.NORMAL) { + recipientCredit = normalModeCredit[_recipient]; + } else { + // We shouldn't get here, but sanity check just in case. + revert InvalidBondDistributionMode(); + } + + // If the game is in refund mode, and the recipient has not unlocked their refund mode + // credit, we unlock it and return early. + if (!hasUnlockedCredit[_recipient]) { + hasUnlockedCredit[_recipient] = true; + weth().unlock(_recipient, recipientCredit); + return; + } + + // Revert if the recipient has no credit to claim. + if (recipientCredit == 0) revert NoCreditToClaim(); + + // Set the recipient's credit balances to 0. + refundModeCredit[_recipient] = 0; + normalModeCredit[_recipient] = 0; + + // Try to withdraw the WETH amount so it can be used here. + weth().withdraw(_recipient, recipientCredit); + + // Transfer the credit to the recipient. + (bool success,) = _recipient.call{ value: recipientCredit }(hex""); + if (!success) revert BondTransferFailed(); + } + + /// @notice Closes out the game, determines the bond distribution mode, attempts to register + /// the game as the anchor game, and emits an event. + function closeGame() public { + // If the bond distribution mode has already been determined, we can return early. + if (bondDistributionMode == BondDistributionMode.REFUND || bondDistributionMode == BondDistributionMode.NORMAL) + { + // We can't revert or we'd break claimCredit(). + return; + } else if (bondDistributionMode != BondDistributionMode.UNDECIDED) { + // We shouldn't get here, but sanity check just in case. + revert InvalidBondDistributionMode(); + } + + // We won't close the game if the system is currently paused. Paused games are temporarily + // invalid which would cause the game to go into refund mode and potentially cause some + // confusion for honest challengers. By blocking the game from being closed while the + // system is paused, the game will only go into refund mode if it ends up being explicitly + // invalidated in the AnchorStateRegistry. If the game has already been closed and a refund + // mode has been selected, we'll already have returned and we won't hit this revert. + if (anchorStateRegistry().paused()) { + revert GamePaused(); + } + + // Make sure that the game is resolved. + // AnchorStateRegistry should be checking this but we're being defensive here. + if (resolvedAt.raw() == 0) { + revert GameNotResolved(); + } + + // Game must be finalized according to the AnchorStateRegistry. + bool finalized = anchorStateRegistry().isGameFinalized(IDisputeGame(address(this))); + if (!finalized) { + revert GameNotFinalized(); + } + + // Try to update the anchor game first. Won't always succeed because delays can lead + // to situations in which this game might not be eligible to be a new anchor game. + // eip150-safe + try anchorStateRegistry().setAnchorState(IDisputeGame(address(this))) { } catch { } + + // Check if the game is a proper game, which will determine the bond distribution mode. + bool properGame = anchorStateRegistry().isGameProper(IDisputeGame(address(this))); + + // If the game is a proper game, the bonds should be distributed normally. Otherwise, go + // into refund mode and distribute bonds back to their original depositors. + if (properGame) { + bondDistributionMode = BondDistributionMode.NORMAL; + } else { + bondDistributionMode = BondDistributionMode.REFUND; + } + + // Emit an event to signal that the game has been closed. + emit GameClosed(bondDistributionMode); + } + + /// @notice Returns the amount of time elapsed on the potential challenger to `_claimIndex`'s chess clock. Maxes + /// out at `MAX_CLOCK_DURATION`. + /// @param _claimIndex The index of the subgame root claim. + /// @return duration_ The time elapsed on the potential challenger to `_claimIndex`'s chess clock. + function getChallengerDuration(uint256 _claimIndex) public view returns (Duration duration_) { + // INVARIANT: The game must be in progress to query the remaining time to respond to a given claim. + if (status != GameStatus.IN_PROGRESS) { + revert GameNotInProgress(); + } + + // Fetch the subgame root claim. + ClaimData storage subgameRootClaim = claimData[_claimIndex]; + + // Fetch the parent of the subgame root's clock, if it exists. + Clock parentClock; + if (subgameRootClaim.parentIndex != type(uint32).max) { + parentClock = claimData[subgameRootClaim.parentIndex].clock; + } + + // Compute the duration elapsed of the potential challenger's clock. + uint64 challengeDuration = + uint64(parentClock.duration().raw() + (block.timestamp - subgameRootClaim.clock.timestamp().raw())); + duration_ = challengeDuration > MAX_CLOCK_DURATION.raw() ? MAX_CLOCK_DURATION : Duration.wrap(challengeDuration); + } + + /// @notice Returns the length of the `claimData` array. + function claimDataLen() external view returns (uint256 len_) { + len_ = claimData.length; + } + + /// @notice Returns the credit balance of a given recipient. + /// @param _recipient The recipient of the credit. + /// @return credit_ The credit balance of the recipient. + function credit(address _recipient) external view returns (uint256 credit_) { + if (bondDistributionMode == BondDistributionMode.REFUND) { + credit_ = refundModeCredit[_recipient]; + } else { + // Always return normal credit balance by default unless we're in refund mode. + credit_ = normalModeCredit[_recipient]; + } + } + + //////////////////////////////////////////////////////////////// + // IMMUTABLE GETTERS // + //////////////////////////////////////////////////////////////// + + /// @notice Returns the max game depth. + function maxGameDepth() external view returns (uint256 maxGameDepth_) { + maxGameDepth_ = MAX_GAME_DEPTH; + } + + /// @notice Returns the split depth. + function splitDepth() external view returns (uint256 splitDepth_) { + splitDepth_ = SPLIT_DEPTH; + } + + /// @notice Returns the max clock duration. + function maxClockDuration() external view returns (Duration maxClockDuration_) { + maxClockDuration_ = MAX_CLOCK_DURATION; + } + + /// @notice Returns the clock extension constant. + function clockExtension() external view returns (Duration clockExtension_) { + clockExtension_ = CLOCK_EXTENSION; + } + + //////////////////////////////////////////////////////////////// + // HELPERS // + //////////////////////////////////////////////////////////////// + + /// @notice Pays out the bond of a claim to a given recipient. + /// @param _recipient The recipient of the bond. + /// @param _bonded The claim to pay out the bond of. + function _distributeBond(address _recipient, ClaimData storage _bonded) internal { + normalModeCredit[_recipient] += _bonded.bond; + } + + /// @notice Verifies the integrity of an execution bisection subgame's root claim. Reverts if the claim + /// is invalid. + /// @param _rootClaim The root claim of the execution bisection subgame. + function _verifyExecBisectionRoot( + Claim _rootClaim, + uint256 _parentIdx, + Position _parentPos, + bool _isAttack + ) + internal + view + { + // The root claim of an execution trace bisection sub-game must: + // 1. Signal that the VM panicked or resulted in an invalid transition if the disputed output root + // was made by the opposing party. + // 2. Signal that the VM resulted in a valid transition if the disputed output root was made by the same party. + + // If the move is a defense, the disputed output could have been made by either party. In this case, we + // need to search for the parent output to determine what the expected status byte should be. + Position disputedLeafPos = Position.wrap(_parentPos.raw() + 1); + ClaimData storage disputed = _findTraceAncestor({ _pos: disputedLeafPos, _start: _parentIdx, _global: true }); + uint8 vmStatus = uint8(_rootClaim.raw()[0]); + + if (_isAttack || disputed.position.depth() % 2 == SPLIT_DEPTH % 2) { + // If the move is an attack, the parent output is always deemed to be disputed. In this case, we only need + // to check that the root claim signals that the VM panicked or resulted in an invalid transition. + // If the move is a defense, and the disputed output and creator of the execution trace subgame disagree, + // the root claim should also signal that the VM panicked or resulted in an invalid transition. + if (!(vmStatus == VMStatuses.INVALID.raw() || vmStatus == VMStatuses.PANIC.raw())) { + revert UnexpectedRootClaim(_rootClaim); + } + } else if (vmStatus != VMStatuses.VALID.raw()) { + // The disputed output and the creator of the execution trace subgame agree. The status byte should + // have signaled that the VM succeeded. + revert UnexpectedRootClaim(_rootClaim); + } + } + + /// @notice Finds the trace ancestor of a given position within the DAG. + /// @param _pos The position to find the trace ancestor claim of. + /// @param _start The index to start searching from. + /// @param _global Whether or not to search the entire dag or just within an execution trace subgame. If set to + /// `true`, and `_pos` is at or above the split depth, this function will revert. + /// @return ancestor_ The ancestor claim that commits to the same trace index as `_pos`. + function _findTraceAncestor( + Position _pos, + uint256 _start, + bool _global + ) + internal + view + returns (ClaimData storage ancestor_) + { + // Grab the trace ancestor's expected position. + Position traceAncestorPos = _global ? _pos.traceAncestor() : _pos.traceAncestorBounded(SPLIT_DEPTH); + + // Walk up the DAG to find a claim that commits to the same trace index as `_pos`. It is + // guaranteed that such a claim exists. + ancestor_ = claimData[_start]; + while (ancestor_.position.raw() != traceAncestorPos.raw()) { + ancestor_ = claimData[ancestor_.parentIndex]; + } + } + + /// @notice Finds the starting and disputed output root for a given `ClaimData` within the DAG. This + /// `ClaimData` must be below the `SPLIT_DEPTH`. + /// @param _start The index within `claimData` of the claim to start searching from. + /// @return startingClaim_ The starting output root claim. + /// @return startingPos_ The starting output root position. + /// @return disputedClaim_ The disputed output root claim. + /// @return disputedPos_ The disputed output root position. + function _findStartingAndDisputedOutputs(uint256 _start) + internal + view + returns (Claim startingClaim_, Position startingPos_, Claim disputedClaim_, Position disputedPos_) + { + // Fatch the starting claim. + uint256 claimIdx = _start; + ClaimData storage claim = claimData[claimIdx]; + + // If the starting claim's depth is less than or equal to the split depth, we revert as this is UB. + if (claim.position.depth() <= SPLIT_DEPTH) revert ClaimAboveSplit(); + + // We want to: + // 1. Find the first claim at the split depth. + // 2. Determine whether it was the starting or disputed output for the exec game. + // 3. Find the complimentary claim depending on the info from #2 (pre or post). + + // Walk up the DAG until the ancestor's depth is equal to the split depth. + uint256 currentDepth; + ClaimData storage execRootClaim = claim; + while ((currentDepth = claim.position.depth()) > SPLIT_DEPTH) { + uint256 parentIndex = claim.parentIndex; + + // If we're currently at the split depth + 1, we're at the root of the execution sub-game. + // We need to keep track of the root claim here to determine whether the execution sub-game was + // started with an attack or defense against the output leaf claim. + if (currentDepth == SPLIT_DEPTH + 1) execRootClaim = claim; + + claim = claimData[parentIndex]; + claimIdx = parentIndex; + } + + // Determine whether the start of the execution sub-game was an attack or defense to the output root + // above. This is important because it determines which claim is the starting output root and which + // is the disputed output root. + (Position execRootPos, Position outputPos) = (execRootClaim.position, claim.position); + bool wasAttack = execRootPos.parent().raw() == outputPos.raw(); + + // Determine the starting and disputed output root indices. + // 1. If it was an attack, the disputed output root is `claim`, and the starting output root is + // elsewhere in the DAG (it must commit to the block # index at depth of `outputPos - 1`). + // 2. If it was a defense, the starting output root is `claim`, and the disputed output root is + // elsewhere in the DAG (it must commit to the block # index at depth of `outputPos + 1`). + if (wasAttack) { + // If this is an attack on the first output root (the block directly after the starting + // block number), the starting claim nor position exists in the tree. We leave these as + // 0, which can be easily identified due to 0 being an invalid Gindex. + if (outputPos.indexAtDepth() > 0) { + ClaimData storage starting = _findTraceAncestor(Position.wrap(outputPos.raw() - 1), claimIdx, true); + (startingClaim_, startingPos_) = (starting.claim, starting.position); + } else { + startingClaim_ = Claim.wrap(startingOutputRoot.root.raw()); + } + (disputedClaim_, disputedPos_) = (claim.claim, claim.position); + } else { + ClaimData storage disputed = _findTraceAncestor(Position.wrap(outputPos.raw() + 1), claimIdx, true); + (startingClaim_, startingPos_) = (claim.claim, claim.position); + (disputedClaim_, disputedPos_) = (disputed.claim, disputed.position); + } + } + + /// @notice Finds the local context hash for a given claim index that is present in an execution trace subgame. + /// @param _claimIndex The index of the claim to find the local context hash for. + /// @return uuid_ The local context hash. + function _findLocalContext(uint256 _claimIndex) internal view returns (Hash uuid_) { + (Claim starting, Position startingPos, Claim disputed, Position disputedPos) = + _findStartingAndDisputedOutputs(_claimIndex); + uuid_ = _computeLocalContext(starting, startingPos, disputed, disputedPos); + } + + /// @notice Computes the local context hash for a set of starting/disputed claim values and positions. + /// @param _starting The starting claim. + /// @param _startingPos The starting claim's position. + /// @param _disputed The disputed claim. + /// @param _disputedPos The disputed claim's position. + /// @return uuid_ The local context hash. + function _computeLocalContext( + Claim _starting, + Position _startingPos, + Claim _disputed, + Position _disputedPos + ) + internal + pure + returns (Hash uuid_) + { + // A position of 0 indicates that the starting claim is the absolute prestate. In this special case, + // we do not include the starting claim within the local context hash. + uuid_ = _startingPos.raw() == 0 + ? Hash.wrap(keccak256(abi.encode(_disputed, _disputedPos))) + : Hash.wrap(keccak256(abi.encode(_starting, _startingPos, _disputed, _disputedPos))); + } +} diff --git a/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol b/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol new file mode 100644 index 0000000000000..a1e658eea6877 --- /dev/null +++ b/packages/contracts-bedrock/src/dispute/v2/PermissionedDisputeGameV2.sol @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Contracts +import { FaultDisputeGameV2 } from "src/dispute/v2/FaultDisputeGameV2.sol"; + +// Libraries +import { Claim } from "src/dispute/lib/Types.sol"; +import { BadAuth } from "src/dispute/lib/Errors.sol"; + +/// @title PermissionedDisputeGameV2 +/// @notice PermissionedDisputeGameV2 is a contract that inherits from `FaultDisputeGameV2`, and contains two roles: +/// - The `challenger` role, which is allowed to challenge a dispute. +/// - The `proposer` role, which is allowed to create proposals and participate in their game. +/// This contract exists as a way for networks to support the fault proof iteration of the OptimismPortal +/// contract without needing to support a fully permissionless system. Permissionless systems can introduce +/// costs that certain networks may not wish to support. This contract can also be used as a fallback mechanism +/// in case of a failure in the permissionless fault proof system in the stage one release. +contract PermissionedDisputeGameV2 is FaultDisputeGameV2 { + /// @notice Modifier that gates access to the `challenger` and `proposer` roles. + modifier onlyAuthorized() { + if (!(msg.sender == proposer() || msg.sender == challenger())) { + revert BadAuth(); + } + _; + } + + /// @notice Semantic version. + /// @custom:semver 2.1.0 + function version() public pure override returns (string memory) { + return "2.1.0"; + } + + /// @param _params Parameters for creating a new FaultDisputeGame. + constructor(GameConstructorParams memory _params) FaultDisputeGameV2(_params) { } + + /// @inheritdoc FaultDisputeGameV2 + function step( + uint256 _claimIndex, + bool _isAttack, + bytes calldata _stateData, + bytes calldata _proof + ) + public + override + onlyAuthorized + { + super.step(_claimIndex, _isAttack, _stateData, _proof); + } + + /// @notice Generic move function, used for both `attack` and `defend` moves. + /// @notice _disputed The disputed `Claim`. + /// @param _challengeIndex The index of the claim being moved against. This must match the `_disputed` claim. + /// @param _claim The claim at the next logical position in the game. + /// @param _isAttack Whether or not the move is an attack or defense. + function move( + Claim _disputed, + uint256 _challengeIndex, + Claim _claim, + bool _isAttack + ) + public + payable + override + onlyAuthorized + { + super.move(_disputed, _challengeIndex, _claim, _isAttack); + } + + /// @notice Initializes the contract. + function initialize() public payable override { + super.initialize(); + + // The creator of the dispute game must be the proposer EOA. + if (tx.origin != proposer()) revert BadAuth(); + } + + function immutableArgsByteCount() internal pure override returns (uint256) { + // Extend expected data length to account for proposer and challenger addresses + // - 20 bytes: proposer address + // - 20 bytes: challenger address + return super.immutableArgsByteCount() + 40; + } + + //////////////////////////////////////////////////////////////// + // IMMUTABLE GETTERS // + //////////////////////////////////////////////////////////////// + + /// @notice Returns the proposer address. The proposer role is allowed to create proposals and participate in the + /// dispute game. + function proposer() public pure returns (address proposer_) { + proposer_ = _getArgAddress(super.immutableArgsByteCount()); + } + + /// @notice Returns the challenger address. The challenger role is allowed to participate in the dispute game. + function challenger() public pure returns (address challenger_) { + challenger_ = _getArgAddress(super.immutableArgsByteCount() + 20); + } +} diff --git a/packages/contracts-bedrock/src/integration/GameHelper.sol b/packages/contracts-bedrock/src/integration/GameHelper.sol new file mode 100644 index 0000000000000..c65685b97ae44 --- /dev/null +++ b/packages/contracts-bedrock/src/integration/GameHelper.sol @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IDisputeGameFactory } from "../../interfaces/dispute/IDisputeGameFactory.sol"; +import { IFaultDisputeGame } from "../../interfaces/dispute/IFaultDisputeGame.sol"; + +// Libraries +import { Claim, Position, GameType } from "src/dispute/lib/Types.sol"; + +/// @title GameHelper +/// @notice GameHelper is a util contract for testing to perform multiple moves in a dispute game in a single +/// transaction. Note that it is unsafe to use in production as the bonds paid cannot be recovered. +contract GameHelper { + struct Move { + uint256 parentIdx; + Claim claim; + bool attack; + } + + /// @notice Performs the specified set of moves in the supplied dispute game. + /// @param _game the game to perform moves in. + /// @param _moves the moves to perform. + function performMoves(IFaultDisputeGame _game, Move[] calldata _moves) public payable { + uint256 movesLen = _moves.length; + for (uint256 i = 0; i < movesLen; i++) { + Move memory move = _moves[i]; + (,,,, Claim pClaim, Position pPosition,) = _game.claimData(move.parentIdx); + uint256 requiredBond = _game.getRequiredBond(pPosition.move(move.attack)); + _game.move{ value: requiredBond }(pClaim, move.parentIdx, move.claim, move.attack); + } + } + + /// @notice Creates a new game and performs the specified moves in it. + /// @param _dgf the DisputeGameFactory to create a game in. + /// @param _gameType the type of game to create. + /// @param _rootClaim the root claim of the new game. + /// @param _extraData the extra data for the new game. + /// @param _moves the array of moves to perform in the new game. + /// @return gameAddr_ the address of the newly created game. + function createGameWithClaims( + IDisputeGameFactory _dgf, + GameType _gameType, + Claim _rootClaim, + bytes memory _extraData, + Move[] calldata _moves + ) + external + payable + returns (address gameAddr_) + { + uint256 initBond = _dgf.initBonds(_gameType); + gameAddr_ = address(_dgf.create{ value: initBond }(_gameType, _rootClaim, _extraData)); + IFaultDisputeGame game = IFaultDisputeGame(gameAddr_); + performMoves(game, _moves); + } + + // @notice Allows funds to be sent to this contract or to use it in a 7702 authorization. + receive() external payable { } +} diff --git a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol index 05a7798aeca3a..7928c064f69a3 100644 --- a/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol +++ b/packages/contracts-bedrock/src/legacy/DeployerWhitelist.sol @@ -42,8 +42,8 @@ contract DeployerWhitelist is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.3 - string public constant version = "1.1.1-beta.3"; + /// @custom:semver 1.1.2 + string public constant version = "1.1.2"; /// @notice Adds or removes an address from the deployment whitelist. /// @param _deployer Address to update permissions for. diff --git a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol index 6e0e33fa94878..1d729789b2082 100644 --- a/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol +++ b/packages/contracts-bedrock/src/legacy/L1BlockNumber.sol @@ -18,8 +18,8 @@ import { IL1Block } from "interfaces/L2/IL1Block.sol"; /// contract instead. contract L1BlockNumber is ISemver { /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.3 - string public constant version = "1.1.1-beta.3"; + /// @custom:semver 1.1.2 + string public constant version = "1.1.2"; /// @notice Returns the L1 block number. receive() external payable { diff --git a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol index bdc81ad2839ec..b1d11649cba78 100644 --- a/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol +++ b/packages/contracts-bedrock/src/legacy/LegacyMessagePasser.sol @@ -15,8 +15,8 @@ contract LegacyMessagePasser is ISemver { mapping(bytes32 => bool) public sentMessages; /// @notice Semantic version. - /// @custom:semver 1.1.1-beta.3 - string public constant version = "1.1.1-beta.3"; + /// @custom:semver 1.1.2 + string public constant version = "1.1.2"; /// @notice Passes a message to L1. /// @param _message Message to pass to L1. diff --git a/packages/contracts-bedrock/src/libraries/DevFeatures.sol b/packages/contracts-bedrock/src/libraries/DevFeatures.sol new file mode 100644 index 0000000000000..5c7b4cdb02def --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/DevFeatures.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice Library of constants representing development features. We use a 32 byte bitmap because +/// it's easier to integrate with op-deployer. Note that users should typically set a +/// single nibble to 1 and the rest to zero, which gives us 64 potential features, like: +/// 0x0000000000000000000000000000000000000000000000000000000000000001 +/// 0x0000000000000000000000000000000000000000000000000000000000000010 +/// 0x0000000000000000000000000000000000000000000000000000000000000100 +/// etc. +/// We'll expand to using all available bits if we need more than 64 concurrent features. +library DevFeatures { + /// @notice The feature that enables the OptimismPortalInterop contract. + bytes32 public constant OPTIMISM_PORTAL_INTEROP = + bytes32(0x0000000000000000000000000000000000000000000000000000000000000001); + + bytes32 public constant CANNON_KONA = bytes32(0x0000000000000000000000000000000000000000000000000000000000000010); + + /// @notice The feature that enables deployment of V2 dispute game contracts. + bytes32 public constant DEPLOY_V2_DISPUTE_GAMES = + bytes32(0x0000000000000000000000000000000000000000000000000000000000000100); + + /// @notice Checks if a feature is enabled in a bitmap. Note that this function does not check + /// that the input feature represents a single feature and the bitwise AND operation + /// allows for multiple features to be enabled at once. Users should generally check + /// for only a single feature at a time. + /// @param _bitmap The bitmap to check. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _bitmap, bytes32 _feature) internal pure returns (bool) { + return _feature != 0 && (_bitmap & _feature) == _feature; + } +} diff --git a/packages/contracts-bedrock/src/libraries/Features.sol b/packages/contracts-bedrock/src/libraries/Features.sol new file mode 100644 index 0000000000000..1521b1d1a3307 --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/Features.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +/// @notice Features is a library that stores feature name constants. Can be used alongside the +/// feature flagging functionality in the SystemConfig contract to selectively enable or +/// disable customizable features of the OP Stack. +library Features { + /// @notice The ETH_LOCKBOX feature determines if the system is configured to use the + /// ETHLockbox contract in the OptimismPortal. When the ETH_LOCKBOX feature is active + /// and the ETHLockbox contract has been configured, the OptimismPortal will use the + /// ETHLockbox to store ETH instead of storing ETH directly in the portal itself. + bytes32 internal constant ETH_LOCKBOX = "ETH_LOCKBOX"; +} diff --git a/packages/contracts-bedrock/src/libraries/SemverComp.sol b/packages/contracts-bedrock/src/libraries/SemverComp.sol new file mode 100644 index 0000000000000..04cb8c3a97895 --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/SemverComp.sol @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Libraries +import { LibString } from "@solady/utils/LibString.sol"; +import { JSONParserLib } from "@solady/utils/JSONParserLib.sol"; + +/// @notice Library for comparing semver strings. Ignores prereleases and build metadata. +library SemverComp { + /// @notice Struct representing a semver string. + /// @custom:field major The major version number. + /// @custom:field minor The minor version number. + /// @custom:field patch The patch version number. + struct Semver { + uint256 major; + uint256 minor; + uint256 patch; + } + + /// @notice Error thrown when a semver string has less than 3 parts. + error SemverComp_InvalidSemverParts(); + + /// @notice Parses a semver string into a Semver struct. Only handles the major, minor, and + /// patch numerical components, ignores prereleases and build metadata. + /// @param _semver The semver string to parse. + /// @return The parsed Semver struct. + function parse(string memory _semver) internal pure returns (Semver memory) { + string[] memory parts = LibString.split(_semver, "."); + + // We need at least 3 parts to be a valid semver, but we might have more parts if the + // semver looks like "1.2.3-beta.4+build.5". + if (parts.length < 3) { + revert SemverComp_InvalidSemverParts(); + } + + // Split the patch component by hyphen, if it exists. We only want the first part of the + // patch. We're ignoring prereleases and build versions in this library. We're handling + // cases like 1.2.3-beta.4+build.5 as well as 1.2.3+build.5. + string[] memory patchParts = LibString.split(parts[2], "-"); + string[] memory patchParts2 = LibString.split(patchParts[0], "+"); + + // Parse the major, minor, and patch components. JSONParserLib will revert if the + // components are not valid decimal numbers. + return Semver({ + major: JSONParserLib.parseUint(parts[0]), + minor: JSONParserLib.parseUint(parts[1]), + patch: JSONParserLib.parseUint(patchParts2[0]) + }); + } + + /// @notice Compares two semver strings (=). Ignores prereleases and build metadata. + /// @param _a The first semver string. + /// @param _b The second semver string. + /// @return True if the semver strings are equal, false otherwise. + function eq(string memory _a, string memory _b) internal pure returns (bool) { + Semver memory a = parse(_a); + Semver memory b = parse(_b); + return a.major == b.major && a.minor == b.minor && a.patch == b.patch; + } + + /// @notice Compares two semver strings (<). Ignores prereleases and build metadata. + /// @param _a The first semver string. + /// @param _b The second semver string. + /// @return True if the first semver string is less than the second, false otherwise. + function lt(string memory _a, string memory _b) internal pure returns (bool) { + Semver memory a = parse(_a); + Semver memory b = parse(_b); + return a.major < b.major || (a.major == b.major && a.minor < b.minor) + || (a.major == b.major && a.minor == b.minor && a.patch < b.patch); + } + + /// @notice Compares two semver strings (<=). Ignores prereleases and build metadata. + /// @param _a The first semver string. + /// @param _b The second semver string. + /// @return True if the first semver string is less than or equal to the second, false otherwise. + function lte(string memory _a, string memory _b) internal pure returns (bool) { + return eq(_a, _b) || lt(_a, _b); + } + + /// @notice Compares two semver strings (>). Ignores prereleases and build metadata. + /// @param _a The first semver string. + /// @param _b The second semver string. + /// @return True if the first semver string is greater than the second, false otherwise. + function gt(string memory _a, string memory _b) internal pure returns (bool) { + return !eq(_a, _b) && !lt(_a, _b); + } + + /// @notice Compares two semver strings (>=). Ignores prereleases and build metadata. + /// @param _a The first semver string. + /// @param _b The second semver string. + /// @return True if the first semver string is greater than or equal to the second, false otherwise. + function gte(string memory _a, string memory _b) internal pure returns (bool) { + return eq(_a, _b) || gt(_a, _b); + } +} diff --git a/packages/contracts-bedrock/src/safe/LivenessGuard.sol b/packages/contracts-bedrock/src/safe/LivenessGuard.sol index 46c0072f7ba47..bd3f5edd23ae8 100644 --- a/packages/contracts-bedrock/src/safe/LivenessGuard.sol +++ b/packages/contracts-bedrock/src/safe/LivenessGuard.sol @@ -30,8 +30,8 @@ contract LivenessGuard is ISemver, BaseGuard { event OwnerRecorded(address owner); /// @notice Semantic version. - /// @custom:semver 1.0.1-beta.4 - string public constant version = "1.0.1-beta.4"; + /// @custom:semver 1.0.2 + string public constant version = "1.0.2"; /// @notice The safe account for which this contract will be the guard. Safe internal immutable SAFE; diff --git a/packages/contracts-bedrock/src/safe/LivenessModule.sol b/packages/contracts-bedrock/src/safe/LivenessModule.sol index a033507176cc5..7645f11903f47 100644 --- a/packages/contracts-bedrock/src/safe/LivenessModule.sol +++ b/packages/contracts-bedrock/src/safe/LivenessModule.sol @@ -58,8 +58,8 @@ contract LivenessModule is ISemver { uint256 internal constant GUARD_STORAGE_SLOT = 0x4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c8; /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.3 - string public constant version = "1.2.1-beta.3"; + /// @custom:semver 1.2.2 + string public constant version = "1.2.2"; // Constructor to initialize the Safe and baseModule instances constructor( diff --git a/packages/contracts-bedrock/src/safe/LivenessModule2.sol b/packages/contracts-bedrock/src/safe/LivenessModule2.sol new file mode 100644 index 0000000000000..457f8678910c8 --- /dev/null +++ b/packages/contracts-bedrock/src/safe/LivenessModule2.sol @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Safe +import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; +import { Enum } from "safe-contracts/common/Enum.sol"; +import { OwnerManager } from "safe-contracts/base/OwnerManager.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + +/// @title LivenessModule2 +/// @notice This module allows challenge-based ownership transfer to a fallback owner +/// when the Safe becomes unresponsive. The fallback owner can initiate a challenge, +/// and if the Safe doesn't respond within the challenge period, ownership transfers +/// to the fallback owner. +/// @dev This is a singleton contract. To use it: +/// 1. The Safe must first enable this module using ModuleManager.enableModule() +/// 2. The Safe must then configure the module by calling configure() with params +contract LivenessModule2 is ISemver { + /// @notice Configuration for a Safe's liveness module. + /// @custom:field livenessResponsePeriod The duration in seconds that Safe owners have to + /// respond to a challenge. + /// @custom:field fallbackOwner The address that can initiate challenges and claim + /// ownership if the Safe is unresponsive. + struct ModuleConfig { + uint256 livenessResponsePeriod; + address fallbackOwner; + } + + /// @notice Mapping from Safe address to its configuration. + mapping(address => ModuleConfig) public livenessSafeConfiguration; + + /// @notice Mapping from Safe address to active challenge start time (0 if none). + mapping(address => uint256) public challengeStartTime; + + /// @notice Reserved address used as previous owner to the first owner in a Safe. + address internal constant SENTINEL_OWNER = address(0x1); + + /// @notice Error for when module is not enabled for the Safe. + error LivenessModule2_ModuleNotEnabled(); + + /// @notice Error for when Safe is not configured for this module. + error LivenessModule2_ModuleNotConfigured(); + + /// @notice Error for when a challenge already exists. + error LivenessModule2_ChallengeAlreadyExists(); + + /// @notice Error for when no challenge exists. + error LivenessModule2_ChallengeDoesNotExist(); + + /// @notice Error for when trying to cancel a challenge after response period has ended. + error LivenessModule2_ResponsePeriodEnded(); + + /// @notice Error for when trying to execute ownership transfer while response period is + /// active. + error LivenessModule2_ResponsePeriodActive(); + + /// @notice Error for when caller is not authorized. + error LivenessModule2_UnauthorizedCaller(); + + /// @notice Error for invalid response period. + error LivenessModule2_InvalidResponsePeriod(); + + /// @notice Error for invalid fallback owner. + error LivenessModule2_InvalidFallbackOwner(); + + /// @notice Error for when trying to clear configuration while module is enabled. + error LivenessModule2_ModuleStillEnabled(); + + /// @notice Error for when ownership transfer verification fails. + error LivenessModule2_OwnershipTransferFailed(); + + /// @notice Emitted when a Safe configures the module. + /// @param safe The Safe address that configured the module. + /// @param livenessResponsePeriod The duration in seconds that Safe owners have to + /// respond to a challenge. + /// @param fallbackOwner The address that can initiate challenges and claim ownership if + /// the Safe is unresponsive. + event ModuleConfigured(address indexed safe, uint256 livenessResponsePeriod, address fallbackOwner); + + /// @notice Emitted when a Safe clears the module configuration. + /// @param safe The Safe address that cleared the module configuration. + event ModuleCleared(address indexed safe); + + /// @notice Emitted when a challenge is started. + /// @param safe The Safe address that started the challenge. + /// @param challengeStartTime The timestamp when the challenge started. + event ChallengeStarted(address indexed safe, uint256 challengeStartTime); + + /// @notice Emitted when a challenge is cancelled. + /// @param safe The Safe address that cancelled the challenge. + event ChallengeCancelled(address indexed safe); + + /// @notice Emitted when ownership is transferred to the fallback owner. + /// @param safe The Safe address that succeeded the challenge. + /// @param fallbackOwner The address that claimed ownership if the Safe is unresponsive. + event ChallengeSucceeded(address indexed safe, address fallbackOwner); + + /// @notice Semantic version. + /// @custom:semver 2.0.0 + string public constant version = "2.0.0"; + + /// @notice Returns challenge_start_time + liveness_response_period if challenge exists, or + /// 0 if not. + /// @param _safe The Safe address to query. + /// @return The challenge end timestamp, or 0 if no challenge. + function getChallengePeriodEnd(address _safe) public view returns (uint256) { + uint256 startTime = challengeStartTime[_safe]; + if (startTime == 0) { + return 0; + } + ModuleConfig storage config = livenessSafeConfiguration[_safe]; + return startTime + config.livenessResponsePeriod; + } + + /// @notice Configures the module for a Safe that has already enabled it. + /// @param _config The configuration parameters for the module containing the response + /// period and fallback owner. + function configureLivenessModule(ModuleConfig memory _config) external { + // Validate configuration parameters to ensure module can function properly. + // livenessResponsePeriod must be > 0 to allow time for Safe owners to respond. + if (_config.livenessResponsePeriod == 0) { + revert LivenessModule2_InvalidResponsePeriod(); + } + // fallbackOwner must not be zero address to have a valid ownership recipient. + if (_config.fallbackOwner == address(0)) { + revert LivenessModule2_InvalidFallbackOwner(); + } + + // Check that this module is enabled on the calling Safe. + _assertModuleEnabled(msg.sender); + + // Store the configuration for this safe + livenessSafeConfiguration[msg.sender] = _config; + + // Clear any existing challenge when configuring/re-configuring. + // This is necessary because changing the configuration (especially + // livenessResponsePeriod) + // would invalidate any ongoing challenge timing, creating inconsistent state. + // For example, if a challenge was started with a 7-day period and we reconfigure to + // 1 day, the challenge timing becomes ambiguous. Canceling ensures clean state. + // Additionally, a Safe that is able to successfully trigger the configuration function + // is necessarily live, so cancelling the challenge also makes sense from a + // theoretical standpoint. + _cancelChallenge(msg.sender); + + emit ModuleConfigured(msg.sender, _config.livenessResponsePeriod, _config.fallbackOwner); + } + + /// @notice Clears the module configuration for a Safe. + /// @dev Note: Clearing the configuration also cancels any ongoing challenges. + /// This function is intended for use when a Safe wants to permanently remove + /// the LivenessModule2 configuration. Typical usage pattern: + /// 1. Safe disables the module via ModuleManager.disableModule(). + /// 2. Safe calls this clearLivenessModule() function to remove stored configuration. + /// 3. If Safe later re-enables the module, it must call configureLivenessModule() again. + /// Never calling clearLivenessModule() after disabling keeps configuration data persistent + /// for potential future re-enabling. + function clearLivenessModule() external { + // Check if the calling safe has configuration set + _assertModuleConfigured(msg.sender); + + // Check that this module is NOT enabled on the calling Safe + // This prevents clearing configuration while module is still enabled + _assertModuleNotEnabled(msg.sender); + + // Erase the configuration data for this safe + delete livenessSafeConfiguration[msg.sender]; + // Also clear any active challenge + _cancelChallenge(msg.sender); + emit ModuleCleared(msg.sender); + } + + /// @notice Challenges an enabled safe. + /// @param _safe The Safe address to challenge. + function challenge(address _safe) external { + // Check if the calling safe has configuration set + _assertModuleConfigured(_safe); + + // Check that the module is still enabled on the target Safe. + _assertModuleEnabled(_safe); + + // Check that the caller is the fallback owner + if (msg.sender != livenessSafeConfiguration[_safe].fallbackOwner) { + revert LivenessModule2_UnauthorizedCaller(); + } + + // Check that no challenge already exists + if (challengeStartTime[_safe] != 0) { + revert LivenessModule2_ChallengeAlreadyExists(); + } + + // Set the challenge start time and emit the event + challengeStartTime[_safe] = block.timestamp; + emit ChallengeStarted(_safe, block.timestamp); + } + + /// @notice Responds to a challenge for an enabled safe, canceling it. + function respond() external { + // Check if the calling safe has configuration set. + _assertModuleConfigured(msg.sender); + + // Check that this module is enabled on the calling Safe. + _assertModuleEnabled(msg.sender); + + // Check that a challenge exists + uint256 startTime = challengeStartTime[msg.sender]; + if (startTime == 0) { + revert LivenessModule2_ChallengeDoesNotExist(); + } + + // Cancel the challenge without checking if response period has expired + // This allows the Safe to respond at any time, providing more flexibility + _cancelChallenge(msg.sender); + } + + /// @notice With successful challenge, removes all current owners from enabled safe, + /// appoints fallback as sole owner, and sets its quorum to 1. + /// @dev Note: After ownership transfer, the fallback owner becomes the sole owner + /// and is also still configured as the fallback owner. This means the + /// fallback owner effectively becomes its own fallback owner, maintaining + /// the ability to challenge itself if needed. + /// @param _safe The Safe address to transfer ownership of. + function changeOwnershipToFallback(address _safe) external { + // Ensure Safe is configured with this module to prevent unauthorized execution. + _assertModuleConfigured(_safe); + + // Verify module is still enabled to ensure Safe hasn't disabled it mid-challenge. + _assertModuleEnabled(_safe); + + // Only fallback owner can execute ownership transfer (per specs update) + if (msg.sender != livenessSafeConfiguration[_safe].fallbackOwner) { + revert LivenessModule2_UnauthorizedCaller(); + } + + // Verify active challenge exists - without challenge, ownership transfer not allowed + uint256 startTime = challengeStartTime[_safe]; + if (startTime == 0) { + revert LivenessModule2_ChallengeDoesNotExist(); + } + + // Ensure response period has fully expired before allowing ownership transfer. + // This gives Safe owners full configured time to demonstrate liveness. + if (block.timestamp < getChallengePeriodEnd(_safe)) { + revert LivenessModule2_ResponsePeriodActive(); + } + + Safe targetSafe = Safe(payable(_safe)); + + // Get current owners + address[] memory owners = targetSafe.getOwners(); + + // Remove all owners after the first one + // Note: This loop is safe as real-world Safes have limited owners (typically < 10) + // Gas limits would only be a concern with hundreds/thousands of owners + while (owners.length > 1) { + targetSafe.execTransactionFromModule({ + to: _safe, + value: 0, + operation: Enum.Operation.Call, + data: abi.encodeCall(OwnerManager.removeOwner, (SENTINEL_OWNER, owners[0], 1)) + }); + owners = targetSafe.getOwners(); + } + + // Now swap the remaining single owner with the fallback owner + targetSafe.execTransactionFromModule({ + to: _safe, + value: 0, + operation: Enum.Operation.Call, + data: abi.encodeCall( + OwnerManager.swapOwner, (SENTINEL_OWNER, owners[0], livenessSafeConfiguration[_safe].fallbackOwner) + ) + }); + + // Sanity check: verify the fallback owner is now the only owner + address[] memory finalOwners = targetSafe.getOwners(); + if (finalOwners.length != 1 || finalOwners[0] != livenessSafeConfiguration[_safe].fallbackOwner) { + revert LivenessModule2_OwnershipTransferFailed(); + } + + // Reset the challenge state to allow a new challenge + delete challengeStartTime[_safe]; + + emit ChallengeSucceeded(_safe, livenessSafeConfiguration[_safe].fallbackOwner); + } + + /// @notice Asserts that the module is configured for the given Safe. + /// @param _safe The Safe address to check. + function _assertModuleConfigured(address _safe) internal view { + ModuleConfig storage config = livenessSafeConfiguration[_safe]; + if (config.fallbackOwner == address(0)) { + revert LivenessModule2_ModuleNotConfigured(); + } + } + + /// @notice Asserts that the module is enabled for the given Safe. + /// @param _safe The Safe address to check. + function _assertModuleEnabled(address _safe) internal view { + Safe safe = Safe(payable(_safe)); + if (!safe.isModuleEnabled(address(this))) { + revert LivenessModule2_ModuleNotEnabled(); + } + } + + /// @notice Asserts that the module is not enabled for the given Safe. + /// @param _safe The Safe address to check. + function _assertModuleNotEnabled(address _safe) internal view { + Safe safe = Safe(payable(_safe)); + if (safe.isModuleEnabled(address(this))) { + revert LivenessModule2_ModuleStillEnabled(); + } + } + + /// @notice Internal function to cancel a challenge and emit the appropriate event. + /// @param _safe The Safe address for which to cancel the challenge. + function _cancelChallenge(address _safe) internal { + // Early return if no challenge exists + if (challengeStartTime[_safe] == 0) return; + + delete challengeStartTime[_safe]; + emit ChallengeCancelled(_safe); + } +} diff --git a/packages/contracts-bedrock/src/safe/TimelockGuard.sol b/packages/contracts-bedrock/src/safe/TimelockGuard.sol new file mode 100644 index 0000000000000..26dce92ce472e --- /dev/null +++ b/packages/contracts-bedrock/src/safe/TimelockGuard.sol @@ -0,0 +1,615 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Safe +import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; +import { Enum } from "safe-contracts/common/Enum.sol"; +import { Guard as IGuard } from "safe-contracts/base/GuardManager.sol"; + +// Libraries +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + +/// @title TimelockGuard +/// @notice This guard provides timelock functionality for Safe transactions +/// @dev This is a singleton contract, any Safe on the network can use this guard to enforce a timelock delay, and +/// allow a subset of signers to cancel a transaction if they do not agree with the execution. This provides +/// significant security improvements over the Safe's default execution mechanism, which will allow any transaction +/// to be executed as long as it is fully signed, and with no mechanism for revealing the existence of said +/// signatures. +/// Usage: +/// In order to use this guard, the Safe must first enable it using Safe.setGuard(), and then configure it +/// by calling TimelockGuard.configureTimelockGuard(). +/// Scheduling and executing transactions: +/// Once enabled and configured, all transactions executed by the Safe's execTransaction() function will revert, +/// unless the transaction has first been scheduled by calling scheduleTransaction() on this contract. Because +/// scheduleTransaction() uses the Safe's own signature verification logic, the same signatures used +/// to execute a transaction can be used to schedule it. +/// Note: this guard does not apply a delay to transactions executed by modules which are installed on the Safe. +/// Cancelling transactions: +/// Once a transaction has been scheduled, so long as it has not already been executed, it can be +/// cancelled by calling cancelTransaction() on this contract. +/// This mechanism allows for a subset of signers to cancel a transaction if they do not agree with the execution. +/// As an 'anti-griefing' mechanism, the cancellation threshold (the number of signatures required to cancel a +/// transaction) starts at 1, and is automatically increased by 1 after each cancellation. +/// The cancellation threshold is reset to 1 after any transaction is executed successfully. +/// Safe Version Compatibility: +/// This guard is compatible with Safe versions 1.3.0 and higher. Earlier versions of the Safe do not expose +/// the checkSignatures or checkNSignatures functions required by this guard. +/// Threats Mitigated and Integration With LivenessModule: +/// This Guard is designed to protect against a number of well-defined scenarios, defined on +/// the two axes of amount of keys compromised, and type of compromise. +/// For scenarios where the keys compromised don't amount to a blocking threshold (the number of signers who must +/// refuse to sign a transaction in order to block it from being executed), regular transactions from the +/// multisig for removal or rotation is the preferred solution. +/// For scenarios where the keys compromised are at least a blocking threshold, but not as much as quorum, the +/// LivenessModule would be used. If there is a quorum of absent keys, but no significant malicious control, the +/// LivenessModule would also be used. +/// The TimelockGuard acts when there is malicious control of a quorum of keys. If the control is temporary, for +/// example by phishing a single set of signatures, then the TimelockGuard's cancellation is enough to stop the +/// attack entirely. If the malicious control would be permanent, then the TimelockGuard will buy some time to +/// execute remediations external to the compromised safe. +/// The following table summarizes the various scenarios and the course of action to take in each case. +/// +---------------------------------------------------------------------------+ +/// | Course of action when X Number of keys... | +/// +-------------------------------------------------------------------------------------------------+ +/// | | ... are Absent | ... are Maliciously Controlled | +/// | X Number of keys | (Honest signers cannot sign) | (Malicious signers can sign) | +/// +-------------------------------------------------------------------------------------------------+ +/// | 1+ | swapOwner | swapOwner | +/// +-------------------------------------------------------------------------------------------------+ +/// | Blocking Threshold+ | challenge + | challenge + | +/// | | changeOwnershipToFallback | changeOwnershipToFallback | +/// +-------------------------------------------------------------------------------------------------+ +/// | Quorum+ | challenge + | cancelTransaction | +/// | | changeOwnershipToFallback | | +/// +-------------------------------------------------------------------------------------------------+ +contract TimelockGuard is IGuard, ISemver { + using EnumerableSet for EnumerableSet.Bytes32Set; + + /// @notice Allowed states of a transaction + enum TransactionState { + NotScheduled, + Pending, + Cancelled, + Executed + } + + /// @notice Scheduled transaction + /// @custom:field executionTime The timestamp when execution becomes valid. + /// @custom:field state The state of the transaction. + /// @custom:field params The parameters of the transaction. + struct ScheduledTransaction { + uint256 executionTime; + TransactionState state; + ExecTransactionParams params; + } + + /// @notice Parameters for the Safe's execTransaction function + /// @custom:field to The address of the contract to call. + /// @custom:field value The value to send with the transaction. + /// @custom:field data The data to send with the transaction. + /// @custom:field operation The operation to perform with the transaction. + /// @custom:field safeTxGas The gas to use for the transaction. + /// @custom:field baseGas The base gas to use for the transaction. + /// @custom:field gasPrice The gas price to use for the transaction. + /// @custom:field gasToken The token to use for the transaction. + /// @custom:field refundReceiver The address to receive the refund for the transaction. + struct ExecTransactionParams { + address to; + uint256 value; + bytes data; + Enum.Operation operation; + uint256 safeTxGas; + uint256 baseGas; + uint256 gasPrice; + address gasToken; + address payable refundReceiver; + } + + /// @notice Aggregated state for each Safe using this guard. + /// @dev We have chosen for operational reasons to keep a list of pending transactions that can be easily retrieved + /// via a function call. This is done by maintaining a separate EnumerableSet with the hashes of pending + /// transactions. Transactions in the enumerable set need to be updated along with updates to the + /// ScheduledTransactions mapping. + struct SafeState { + uint256 timelockDelay; + uint256 cancellationThreshold; + mapping(bytes32 => ScheduledTransaction) scheduledTransactions; + EnumerableSet.Bytes32Set pendingTxHashes; + } + + /// @notice Mapping from Safe address to its timelock guard state. + mapping(Safe => SafeState) internal _safeState; + + /// @notice Semantic version. + /// @custom:semver 1.0.0 + string public constant version = "1.0.0"; + + /// @notice Error for when guard is not enabled for the Safe + error TimelockGuard_GuardNotEnabled(); + + /// @notice Error for when Safe is not configured for this guard + error TimelockGuard_GuardNotConfigured(); + + /// @notice Error for invalid timelock delay + error TimelockGuard_InvalidTimelockDelay(); + + /// @notice Error for when a transaction is already scheduled + error TimelockGuard_TransactionAlreadyScheduled(); + + /// @notice Error for when a transaction is already cancelled + error TimelockGuard_TransactionAlreadyCancelled(); + + /// @notice Error for when a transaction is not scheduled + error TimelockGuard_TransactionNotScheduled(); + + /// @notice Error for when a transaction is not ready to execute (timelock delay not passed) + error TimelockGuard_TransactionNotReady(); + + /// @notice Error for when a transaction has already been executed + error TimelockGuard_TransactionAlreadyExecuted(); + + /// @notice Error for when the contract is not at least version 1.3.0 + error TimelockGuard_InvalidVersion(); + + /// @notice Emitted when a Safe configures the guard + /// @param safe The Safe whose guard is configured. + /// @param timelockDelay The timelock delay in seconds. + event GuardConfigured(Safe indexed safe, uint256 timelockDelay); + + /// @notice Emitted when a transaction is scheduled for a Safe. + /// @param safe The Safe whose transaction is scheduled. + /// @param txHash The identifier of the scheduled transaction (nonce-independent). + /// @param executionTime The timestamp when execution becomes valid. + event TransactionScheduled(Safe indexed safe, bytes32 indexed txHash, uint256 executionTime); + + /// @notice Emitted when a transaction is cancelled for a Safe. + /// @param safe The Safe whose transaction is cancelled. + /// @param txHash The identifier of the cancelled transaction (nonce-independent). + event TransactionCancelled(Safe indexed safe, bytes32 indexed txHash); + + /// @notice Emitted when the cancellation threshold is updated + /// @param safe The Safe whose cancellation threshold is updated. + /// @param oldThreshold The old cancellation threshold. + /// @param newThreshold The new cancellation threshold. + event CancellationThresholdUpdated(Safe indexed safe, uint256 oldThreshold, uint256 newThreshold); + + /// @notice Emitted when a transaction is executed for a Safe. + /// @param safe The Safe whose transaction is executed. + /// @param txHash The identifier of the executed transaction (nonce-independent). + event TransactionExecuted(Safe indexed safe, bytes32 txHash); + + /// @notice Used to emit a message, primarily to ensure that the cancelTransaction function is + /// is not labelled as view so that it is treated as a state-changing function. + event Message(string message); + + //////////////////////////////////////////////////////////////// + // Internal View Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Returns the blocking threshold, which is defined as the minimum number of owners that must coordinate to + /// block a transaction from being executed by refusing to sign. + /// @param _safe The Safe address to query + /// @return The current blocking threshold + function _blockingThreshold(Safe _safe) internal view returns (uint256) { + return _safe.getOwners().length - _safe.getThreshold() + 1; + } + + /// @notice Internal helper to get the guard address from a Safe + /// @param _safe The Safe address + /// @return The current guard address + function _isGuardEnabled(Safe _safe) internal view returns (bool) { + // keccak256("guard_manager.guard.address") from GuardManager + bytes32 guardSlot = 0x4a204f620c8c5ccdca3fd54d003badd85ba500436a431f0cbda4f558c93c34c8; + address guard = abi.decode(_safe.getStorageAt(uint256(guardSlot), 1), (address)); + return guard == address(this); + } + + //////////////////////////////////////////////////////////////// + // External View Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Returns the cancellation threshold for a given safe + /// @param _safe The Safe address to query + /// @return The current cancellation threshold + function cancellationThreshold(Safe _safe) public view returns (uint256) { + // Return 0 if guard is not enabled + if (!_isGuardEnabled(_safe)) { + return 0; + } + + return _safeState[_safe].cancellationThreshold; + } + + /// @notice Returns the maximum cancellation threshold for a given safe + /// @dev The cancellation threshold must be capped in order to preserve the ability of honest users to cancel + /// malicious transactions. The rationale for the calculation of the maximum cancellation threshold is as + /// follows: + /// If the quorum is lower, then it is used as the maximum cancellation threshold, + /// so that even if an attacker has _joint control_ of a quorum of keys, the honest users can still + /// indefinitely cancel a malicious transaction. + /// If the blocking threshold is lower, then it is used as the maximum cancellation threshold, so that if an + /// attacker has less than a quorum of keys, honest users can still remove an attacker from the Safe by + /// refusing to respond to a malicious transaction. + /// @param _safe The Safe address to query + /// @return The maximum cancellation threshold + function maxCancellationThreshold(Safe _safe) public view returns (uint256) { + uint256 blockingThreshold = _blockingThreshold(_safe); + uint256 quorum = _safe.getThreshold(); + // Return the minimum of the blocking threshold and the quorum + return (blockingThreshold < quorum ? blockingThreshold : quorum); + } + + /// @notice Returns the timelock delay for a given Safe + /// @param _safe The Safe address to query + /// @return The timelock delay in seconds + function timelockConfiguration(Safe _safe) public view returns (uint256) { + return _safeState[_safe].timelockDelay; + } + + /// @notice Returns the scheduled transaction for a given Safe and tx hash + /// @dev This function is necessary to properly expose the scheduledTransactions mapping, as + /// simply making the mapping public will return a tuple instead of a struct. + function scheduledTransaction(Safe _safe, bytes32 _txHash) public view returns (ScheduledTransaction memory) { + return _safeState[_safe].scheduledTransactions[_txHash]; + } + + /// @notice Returns the list of all scheduled but not cancelled or executed transactions for + /// for a given safe + /// @dev WARNING: This operation will copy the entire set of pending transactions to memory, + /// which can be quite expensive. This is designed only to be used by view accessors that are + /// queried without any gas fees. Developers should keep in mind that this function has an + /// unbounded cost, and using it as part of a state-changing function may render the function + /// uncallable if the set grows to a point where copying to memory consumes too much gas to fit + /// in a block. + /// @return List of pending transaction hashes + function pendingTransactions(Safe _safe) external view returns (ScheduledTransaction[] memory) { + SafeState storage safeState = _safeState[_safe]; + + // Get the list of pending transaction hashes + bytes32[] memory hashes = safeState.pendingTxHashes.values(); + + // We want to provide the caller with the full parameters of each pending transaction, but mappings are not + // iterable, so we use the enumerable set of pending transaction hashes to retrieve the ScheduledTransaction + // struct for each hash, and then return an array of the ScheduledTransaction structs. + ScheduledTransaction[] memory scheduled = new ScheduledTransaction[](hashes.length); + for (uint256 i = 0; i < hashes.length; i++) { + scheduled[i] = safeState.scheduledTransactions[hashes[i]]; + } + return scheduled; + } + + //////////////////////////////////////////////////////////////// + // Guard Interface Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Implementation of IGuard interface.Called by the Safe before executing a transaction + /// @dev This function is used to check that the transaction has been scheduled and is ready to execute. + /// It only reads the state of the contract, and potentially reverts in order to protect against execution of + /// unscheduled, early or cancelled transactions. + function checkTransaction( + address _to, + uint256 _value, + bytes memory _data, + Enum.Operation _operation, + uint256 _safeTxGas, + uint256 _baseGas, + uint256 _gasPrice, + address _gasToken, + address payable _refundReceiver, + bytes memory, /* signatures */ + address /* msgSender */ + ) + external + view + override + { + Safe callingSafe = Safe(payable(msg.sender)); + + if (_safeState[callingSafe].timelockDelay == 0) { + // We return immediately. This is important in order to allow a Safe which has the + // guard set, but not configured, to complete the setup process. + + // It is also just a reasonable thing to do, since an unconfigured Safe must have a + // delay of zero. + return; + } + + // Get the nonce of the Safe for the transaction being executed, + // since the Safe's nonce is incremented before the transaction is executed, + // we must subtract 1. + uint256 nonce = callingSafe.nonce() - 1; + + // Get the transaction hash from the Safe's getTransactionHash function + bytes32 txHash = callingSafe.getTransactionHash( + _to, _value, _data, _operation, _safeTxGas, _baseGas, _gasPrice, _gasToken, _refundReceiver, nonce + ); + + // Get the scheduled transaction + ScheduledTransaction storage scheduledTx = _safeState[callingSafe].scheduledTransactions[txHash]; + + // Check if the transaction was cancelled + if (scheduledTx.state == TransactionState.Cancelled) { + revert TimelockGuard_TransactionAlreadyCancelled(); + } + + // Check if the transaction has already been executed + // Note: this is of course enforced by the Safe itself, but we check it here for + // completeness + if (scheduledTx.state == TransactionState.Executed) { + revert TimelockGuard_TransactionAlreadyExecuted(); + } + + // Check if the transaction has been scheduled + if (scheduledTx.state == TransactionState.NotScheduled) { + revert TimelockGuard_TransactionNotScheduled(); + } + + // Check if the timelock delay has passed + if (scheduledTx.executionTime > block.timestamp) { + revert TimelockGuard_TransactionNotReady(); + } + } + + /// @notice Implementation of IGuard interface. Called by the Safe after executing a transaction + /// @dev This function is used to update the state of the contract after the transaction has been executed. + /// Although making state changes here is a violation of the Checks-Effects-Interactions pattern, it + /// safe to do in this case because we trust that the Safe does not enable arbitrary calls without + /// proper authorization checks. + function checkAfterExecution(bytes32 _txHash, bool _success) external override { + Safe callingSafe = Safe(payable(msg.sender)); + // If the timelock delay is zero, we return immediately. + // This is important in order to allow a Safe which has the guard set, but not configured, + // to complete the setup process. + // It is also just a reasonable thing to do, since an unconfigured Safe must have a delay of zero, and so + // we do not expect the transaction to have been scheduled. + if (_safeState[callingSafe].timelockDelay == 0) { + return; + } + + // If the transaction failed, then we return early and leave the transaction in its current state, + // which allows the transaction to be retried. + // This is consistent with the Safe's own behaviour, which does not increment the nonce if the + // call fails. + if (!_success) { + return; + } + + ScheduledTransaction storage scheduledTx = _safeState[callingSafe].scheduledTransactions[_txHash]; + + // Set the transaction as executed + scheduledTx.state = TransactionState.Executed; + _safeState[callingSafe].pendingTxHashes.remove(_txHash); + + // Reset the cancellation threshold + _resetCancellationThreshold(callingSafe); + + emit TransactionExecuted(callingSafe, _txHash); + } + + //////////////////////////////////////////////////////////////// + // Internal State-Changing Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Increase the cancellation threshold for a safe + /// @dev This function must be called only once and only when calling cancel + /// @param _safe The Safe address to increase the cancellation threshold for. + function _increaseCancellationThreshold(Safe _safe) internal { + SafeState storage safeState = _safeState[_safe]; + + if (safeState.cancellationThreshold < maxCancellationThreshold(_safe)) { + uint256 oldThreshold = safeState.cancellationThreshold; + safeState.cancellationThreshold++; + emit CancellationThresholdUpdated(_safe, oldThreshold, safeState.cancellationThreshold); + } + } + + /// @notice Reset the cancellation threshold for a safe + /// @dev This function must be called only once and only when calling checkAfterExecution + /// @param _safe The Safe address to reset the cancellation threshold for. + function _resetCancellationThreshold(Safe _safe) internal { + SafeState storage safeState = _safeState[_safe]; + uint256 oldThreshold = safeState.cancellationThreshold; + safeState.cancellationThreshold = 1; + emit CancellationThresholdUpdated(_safe, oldThreshold, 1); + } + + //////////////////////////////////////////////////////////////// + // External State-Changing Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Configure the contract as a timelock guard by setting the timelock delay + /// @dev This function is only callable by the Safe itself. + /// Requiring a call from the Safe itself (rather than accepting signatures directly as in cancelTransaction()) + /// is important to ensure that maliciously gathered signatures will not be able to instantly reconfigure + /// the delay to zero. This function does not check that the guard is enabled on the Safe, the recommended + /// approach is to atomically enable the guard and configure the delay in a single batched transaction. + /// @param _timelockDelay The timelock delay in seconds (0 to clear configuration) + function configureTimelockGuard(uint256 _timelockDelay) external { + // Record the calling Safe + Safe callingSafe = Safe(payable(msg.sender)); + + // Check that the contract is at least version 1.3.0 + // Prior to version 1.3.0, checkSignatures() was not exposed as a public function, so we need to check the + // version otherwise the safe will be bricked. + if (SemverComp.lt(callingSafe.VERSION(), "1.3.0")) { + revert TimelockGuard_InvalidVersion(); + } + + // Validate timelock delay - must not be longer than 1 year + if (_timelockDelay > 365 days) { + revert TimelockGuard_InvalidTimelockDelay(); + } + + // Store the timelock delay for this safe + _safeState[callingSafe].timelockDelay = _timelockDelay; + + // Initialize (or reset) the cancellation threshold to 1. + _resetCancellationThreshold(callingSafe); + emit GuardConfigured(callingSafe, _timelockDelay); + } + + /// @notice Schedule a transaction for execution after the timelock delay. + /// @dev This function validates signatures in the exact same way as the Safe's own execTransaction function, + /// meaning that the same signatures used to schedule a transaction can be used to execute it later. This + /// maintains compatibility with existing signature generation tools. Owners can use any method to sign the + /// a transaction, including signing with a private key, calling the Safe's approveHash function, or EIP1271 + /// contract signatures. + /// @param _safe The Safe address to schedule the transaction for. + /// @param _nonce The nonce of the Safe for the transaction being scheduled. + /// @param _params The parameters of the transaction being scheduled. + /// @param _signatures The signatures of the owners who are scheduling the transaction. + function scheduleTransaction( + Safe _safe, + uint256 _nonce, + ExecTransactionParams memory _params, + bytes memory _signatures + ) + external + { + // Check that this guard is enabled on the calling Safe + if (!_isGuardEnabled(_safe)) { + revert TimelockGuard_GuardNotEnabled(); + } + + // Check that the guard has been configured for the Safe + if (_safeState[_safe].timelockDelay == 0) { + revert TimelockGuard_GuardNotConfigured(); + } + + // Get the encoded transaction data as defined in the Safe + // The format of the string returned is: "0x1901{domainSeparator}{safeTxHash}" + bytes memory txHashData = _safe.encodeTransactionData( + _params.to, + _params.value, + _params.data, + _params.operation, + _params.safeTxGas, + _params.baseGas, + _params.gasPrice, + _params.gasToken, + _params.refundReceiver, + _nonce + ); + + // Get the transaction hash and data as defined in the Safe + // This value is identical to keccak256(txHashData), but we prefer to use the Safe's own + // internal logic as it is more future-proof in case future versions of the Safe change + // the transaction hash derivation. + bytes32 txHash = _safe.getTransactionHash( + _params.to, + _params.value, + _params.data, + _params.operation, + _params.safeTxGas, + _params.baseGas, + _params.gasPrice, + _params.gasToken, + _params.refundReceiver, + _nonce + ); + + // Check if the transaction exists + // A transaction can only be scheduled once, regardless of whether it has been cancelled or not, + // as otherwise an observer could reuse the same signatures to either: + // 1. Reschedule a transaction after it has been cancelled + // 2. Reschedule a pending transaction, which would update the execution time thus extending the delay + // for the original transaction. + if (_safeState[_safe].scheduledTransactions[txHash].executionTime != 0) { + revert TimelockGuard_TransactionAlreadyScheduled(); + } + + // Verify signatures using the Safe's signature checking logic + // This function call reverts if the signatures are invalid. + _safe.checkSignatures(txHash, txHashData, _signatures); + + // Calculate the execution time + uint256 executionTime = block.timestamp + _safeState[_safe].timelockDelay; + + // Schedule the transaction and add it to the pending transactions set + _safeState[_safe].scheduledTransactions[txHash] = + ScheduledTransaction({ executionTime: executionTime, state: TransactionState.Pending, params: _params }); + _safeState[_safe].pendingTxHashes.add(txHash); + + emit TransactionScheduled(_safe, txHash, executionTime); + } + + /// @notice Cancel a scheduled transaction if cancellation threshold is met + /// @dev This function aims to mimic the approach which would be used by a quorum of signers to + /// cancel a partially signed transaction, by signing and executing an empty + /// transaction at the same nonce. + /// This enables us to define a standard "cancellation transaction" format using the Safe address, nonce, + /// and hash of the transaction being cancelled. This is necessary to ensure that the cancellation transaction + /// is unique and cannot be used to cancel another transaction at the same nonce. + /// + /// Signature verification uses the Safe's checkNSignatures function, so that the number of signatures + /// can be set by the Safe's current cancellation threshold. Another benefit of checkNSignatures is that owners + /// can use any method to sign the cancellation transaction inputs, including signing with a private key, + /// calling the Safe's approveHash function, or EIP1271 contract signatures. + /// @param _safe The Safe address to cancel the transaction for. + /// @param _txHash The hash of the transaction being cancelled. + /// @param _nonce The nonce of the Safe for the transaction being cancelled. + /// @param _signatures The signatures of the owners who are cancelling the transaction. + function cancelTransaction(Safe _safe, bytes32 _txHash, uint256 _nonce, bytes memory _signatures) external { + // The following checks ensure that the transaction has: + // 1. Been scheduled + // 2. Not already been cancelled + // 3. Not already been executed + // There is nothing inherently wrong with cancelling a transaction a transaction that doesn't meet these + // criteria, but we revert in order to inform the user, and avoid emitting a misleading TransactionCancelled + // event. + if (_safeState[_safe].scheduledTransactions[_txHash].state == TransactionState.Cancelled) { + revert TimelockGuard_TransactionAlreadyCancelled(); + } + if (_safeState[_safe].scheduledTransactions[_txHash].state == TransactionState.Executed) { + revert TimelockGuard_TransactionAlreadyExecuted(); + } + if (_safeState[_safe].scheduledTransactions[_txHash].state == TransactionState.NotScheduled) { + revert TimelockGuard_TransactionNotScheduled(); + } + + // Generate the cancellation transaction data + bytes memory txData = abi.encodeCall(this.signCancellation, (_txHash)); + // Any nonce can be used here, as long as all of the signatures are for the same + // nonce. In practice we expect the nonce to be the same as the nonce of the transaction + // being cancelled, as this most closely mimics the behaviour of the Safe UI's transaction + // replacement feature. However we do not enforce that here, to allow for flexibility, + // and to avoid the need for logic to retrieve the nonce from the transaction being + // cancelled. + bytes memory cancellationTxData = _safe.encodeTransactionData( + address(this), 0, txData, Enum.Operation.Call, 0, 0, 0, address(0), address(0), _nonce + ); + bytes32 cancellationTxHash = _safe.getTransactionHash( + address(this), 0, txData, Enum.Operation.Call, 0, 0, 0, address(0), address(0), _nonce + ); + + // Verify signatures using the Safe's signature checking logic, with the cancellation threshold as + // the number of signatures required. + _safe.checkNSignatures( + cancellationTxHash, cancellationTxData, _signatures, _safeState[_safe].cancellationThreshold + ); + + // Set the transaction as cancelled, and remove it from the pending transactions set + _safeState[_safe].scheduledTransactions[_txHash].state = TransactionState.Cancelled; + _safeState[_safe].pendingTxHashes.remove(_txHash); + + // Increase the cancellation threshold + _increaseCancellationThreshold(_safe); + + emit TransactionCancelled(_safe, _txHash); + } + + //////////////////////////////////////////////////////////////// + // Dummy Functions // + //////////////////////////////////////////////////////////////// + + /// @notice Dummy function provided as a utility to facilitate signing cancelTransaction data in + /// the Safe UI. + function signCancellation(bytes32) public { + emit Message("This function is not meant to be called, did you mean to call cancelTransaction?"); + } +} diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol index 93ee9b9269ab2..1ef7370f28824 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20.sol @@ -17,7 +17,7 @@ import { ILegacyMintableERC20 } from "interfaces/legacy/ILegacyMintableERC20.sol /// @title OptimismMintableERC20 /// @notice OptimismMintableERC20 is a standard extension of the base ERC20 token contract designed /// to allow the StandardBridge contracts to mint and burn tokens. This makes it possible to -/// use an OptimismMintablERC20 as the L2 representation of an L1 token, or vice-versa. +/// use an OptimismMintableERC20 as the L2 representation of an L1 token, or vice-versa. /// Designed to be backwards compatible with the older StandardL2ERC20 token which was only /// meant for use on L2. contract OptimismMintableERC20 is ERC20Permit, ISemver { @@ -47,8 +47,8 @@ contract OptimismMintableERC20 is ERC20Permit, ISemver { } /// @notice Semantic version. - /// @custom:semver 1.4.0-beta.5 - string public constant version = "1.4.0-beta.5"; + /// @custom:semver 1.4.1 + string public constant version = "1.4.1"; /// @notice Getter function for the permit2 address. It deterministically deployed /// so it will always be at the same address. It is also included as a preinstall, diff --git a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol index 37af174cce5bc..adebbffc5e07b 100644 --- a/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol +++ b/packages/contracts-bedrock/src/universal/OptimismMintableERC20Factory.sol @@ -51,8 +51,8 @@ contract OptimismMintableERC20Factory is ISemver, Initializable, IOptimismERC20F /// the OptimismMintableERC20 token contract since this contract /// is responsible for deploying OptimismMintableERC20 contracts. /// @notice Semantic version. - /// @custom:semver 1.10.1 - string public constant version = "1.10.1"; + /// @custom:semver 1.10.2 + string public constant version = "1.10.2"; /// @notice Constructs the OptimismMintableERC20Factory contract. constructor() { diff --git a/packages/contracts-bedrock/src/universal/StorageSetter.sol b/packages/contracts-bedrock/src/universal/StorageSetter.sol index 9656ca21c5d2d..1d69559c6b7c6 100644 --- a/packages/contracts-bedrock/src/universal/StorageSetter.sol +++ b/packages/contracts-bedrock/src/universal/StorageSetter.sol @@ -19,8 +19,8 @@ contract StorageSetter is ISemver { } /// @notice Semantic version. - /// @custom:semver 1.2.1-beta.4 - string public constant version = "1.2.1-beta.4"; + /// @custom:semver 1.2.2 + string public constant version = "1.2.2"; /// @notice Stores a bytes32 `_value` at `_slot`. Any storage slots that /// are packed should be set through this interface. diff --git a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol index 349f60a05cd80..d6a778224d18a 100644 --- a/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol +++ b/packages/contracts-bedrock/test/L1/DataAvailabilityChallenge.t.sol @@ -76,28 +76,42 @@ contract DataAvailabilityChallenge_SetResolverRefundPercentage_Test is DataAvail vm.prank(owner); dataAvailabilityChallenge.setResolverRefundPercentage(101); } + + /// @notice Test that the `setResolverRefundPercentage` function reverts if sender is not owner. + function testFuzz_setResolverRefundPercentage_onlyOwner_reverts(address _notOwner, uint256 _percentage) public { + vm.assume(_notOwner != dataAvailabilityChallenge.owner()); + _percentage = bound(_percentage, 0, 100); + + vm.prank(_notOwner); + vm.expectRevert("Ownable: caller is not the owner"); + dataAvailabilityChallenge.setResolverRefundPercentage(_percentage); + } } /// @title DataAvailabilityChallenge_Receive_Test /// @notice Test contract for DataAvailabilityChallenge `receive` function. contract DataAvailabilityChallenge_Receive_Test is DataAvailabilityChallenge_TestInit { - /// @notice Test that the `receive` function succeeds. - function test_receive_succeeds() public { - assertEq(dataAvailabilityChallenge.balances(address(this)), 0); - (bool success,) = payable(address(dataAvailabilityChallenge)).call{ value: 1000 }(""); + /// @notice Test that the `receive` function succeeds with various amounts. + function testFuzz_receive_succeeds(uint256 _amount) public { + vm.deal(address(this), _amount); + + uint256 initialBalance = dataAvailabilityChallenge.balances(address(this)); + (bool success,) = payable(address(dataAvailabilityChallenge)).call{ value: _amount }(""); assertTrue(success); - assertEq(dataAvailabilityChallenge.balances(address(this)), 1000); + assertEq(dataAvailabilityChallenge.balances(address(this)), initialBalance + _amount); } } /// @title DataAvailabilityChallenge_Deposit_Test /// @notice Test contract for DataAvailabilityChallenge `deposit` function. contract DataAvailabilityChallenge_Deposit_Test is DataAvailabilityChallenge_TestInit { - /// @notice Test that the `deposit` function succeeds. - function test_deposit_succeeds() public { - assertEq(dataAvailabilityChallenge.balances(address(this)), 0); - dataAvailabilityChallenge.deposit{ value: 1000 }(); - assertEq(dataAvailabilityChallenge.balances(address(this)), 1000); + /// @notice Test that the `deposit` function succeeds with various amounts. + function testFuzz_deposit_succeeds(uint256 _amount) public { + vm.deal(address(this), _amount); + + uint256 initialBalance = dataAvailabilityChallenge.balances(address(this)); + dataAvailabilityChallenge.deposit{ value: _amount }(); + assertEq(dataAvailabilityChallenge.balances(address(this)), initialBalance + _amount); } } @@ -152,25 +166,178 @@ contract DataAvailabilityChallenge_Withdraw_Test is DataAvailabilityChallenge_Te } } +/// @title DataAvailabilityChallenge_GetChallenge_Test +/// @notice Test contract for DataAvailabilityChallenge `getChallenge` function. +contract DataAvailabilityChallenge_GetChallenge_Test is DataAvailabilityChallenge_TestInit { + /// @notice Test that the `getChallenge` function returns uninitialized challenge. + function test_getChallenge_uninitializedChallenge_succeeds() public view { + bytes memory commitment = computeCommitmentKeccak256("test data"); + uint256 blockNumber = 100; + + Challenge memory challenge = dataAvailabilityChallenge.getChallenge(blockNumber, commitment); + + assertEq(challenge.challenger, address(0)); + assertEq(challenge.lockedBond, 0); + assertEq(challenge.startBlock, 0); + assertEq(challenge.resolvedBlock, 0); + } + + /// @notice Test that the `getChallenge` function returns active challenge data. + function testFuzz_getChallenge_activeChallenge_succeeds( + address _challenger, + uint256 _challengedBlockNumber, + bytes memory _preImage + ) + public + { + vm.assume(_challenger != address(0)); + _challengedBlockNumber = + bound(_challengedBlockNumber, 0, type(uint256).max - dataAvailabilityChallenge.challengeWindow() - 1); + + bytes memory commitment = computeCommitmentKeccak256(_preImage); + uint256 bondSize = dataAvailabilityChallenge.bondSize(); + + vm.roll(_challengedBlockNumber + 1); + vm.deal(_challenger, bondSize); + vm.prank(_challenger); + dataAvailabilityChallenge.challenge{ value: bondSize }(_challengedBlockNumber, commitment); + + Challenge memory challenge = dataAvailabilityChallenge.getChallenge(_challengedBlockNumber, commitment); + + assertEq(challenge.challenger, _challenger); + assertEq(challenge.lockedBond, bondSize); + assertEq(challenge.startBlock, block.number); + assertEq(challenge.resolvedBlock, 0); + } +} + +/// @title DataAvailabilityChallenge_GetChallengeStatus_Test +/// @notice Test contract for DataAvailabilityChallenge `getChallengeStatus` function. +contract DataAvailabilityChallenge_GetChallengeStatus_Test is DataAvailabilityChallenge_TestInit { + /// @notice Test that the `getChallengeStatus` function returns correct status for each state. + function test_getChallengeStatus_allStates_succeeds() public { + bytes memory preImage = "test data"; + bytes memory commitment = computeCommitmentKeccak256(preImage); + uint256 challengedBlockNumber = 100; + uint256 bondSize = dataAvailabilityChallenge.bondSize(); + + // Test uninitialized status + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Uninitialized) + ); + + // Create active challenge + vm.roll(challengedBlockNumber + 1); + vm.deal(address(this), bondSize); + dataAvailabilityChallenge.challenge{ value: bondSize }(challengedBlockNumber, commitment); + + // Test active status + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Active) + ); + + // Resolve the challenge + dataAvailabilityChallenge.resolve(challengedBlockNumber, commitment, preImage); + + // Test resolved status + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Resolved) + ); + } + + /// @notice Test that the `getChallengeStatus` function returns expired status. + function test_getChallengeStatus_expiredChallenge_succeeds() public { + bytes memory commitment = computeCommitmentKeccak256("test data"); + uint256 challengedBlockNumber = 100; + uint256 bondSize = dataAvailabilityChallenge.bondSize(); + + // Create challenge + vm.roll(challengedBlockNumber + 1); + vm.deal(address(this), bondSize); + dataAvailabilityChallenge.challenge{ value: bondSize }(challengedBlockNumber, commitment); + + // Move past resolve window + vm.roll(block.number + dataAvailabilityChallenge.resolveWindow() + 1); + + // Test expired status + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Expired) + ); + } + + /// @notice Test status transitions with fuzz testing. + function testFuzz_getChallengeStatus_transitions_succeeds( + uint256 _challengedBlockNumber, + bytes memory _preImage + ) + public + { + _challengedBlockNumber = bound( + _challengedBlockNumber, + 0, + type(uint256).max - dataAvailabilityChallenge.challengeWindow() - dataAvailabilityChallenge.resolveWindow() + - 10 + ); + + bytes memory commitment = computeCommitmentKeccak256(_preImage); + uint256 bondSize = dataAvailabilityChallenge.bondSize(); + + // Initially uninitialized + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(_challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Uninitialized) + ); + + // Create challenge and verify active + vm.roll(_challengedBlockNumber + 1); + vm.deal(address(this), bondSize); + dataAvailabilityChallenge.challenge{ value: bondSize }(_challengedBlockNumber, commitment); + + assertEq( + uint8(dataAvailabilityChallenge.getChallengeStatus(_challengedBlockNumber, commitment)), + uint8(ChallengeStatus.Active) + ); + } +} + /// @title DataAvailabilityChallenge_ValidateCommitment_Test /// @notice Test contract for DataAvailabilityChallenge `validateCommitment` function. contract DataAvailabilityChallenge_ValidateCommitment_Test is DataAvailabilityChallenge_TestInit { - /// @notice Test that the `validateCommitment` function handles valid and invalid commitments correctly. - function test_validateCommitment_succeeds() public { - // Should not revert given a valid commitment + /// @notice Test that the `validateCommitment` function handles valid commitment. + function test_validateCommitment_validCommitment_succeeds() public view { bytes memory validCommitment = abi.encodePacked(CommitmentType.Keccak256, keccak256("test")); dataAvailabilityChallenge.validateCommitment(validCommitment); + } + + /// @notice Test that the `validateCommitment` function reverts for unknown commitment types. + function testFuzz_validateCommitment_unknownType_reverts(uint8 _unknownType, bytes32 _hash) public { + vm.assume(_unknownType != uint8(CommitmentType.Keccak256)); + + bytes memory unknownTypeCommitment = abi.encodePacked(_unknownType, _hash); - // Should revert if the commitment type is unknown - vm.expectRevert(abi.encodeWithSelector(IDataAvailabilityChallenge.UnknownCommitmentType.selector, uint8(1))); - bytes memory unknownType = abi.encodePacked(uint8(1), keccak256("test")); - dataAvailabilityChallenge.validateCommitment(unknownType); + vm.expectRevert(abi.encodeWithSelector(IDataAvailabilityChallenge.UnknownCommitmentType.selector, _unknownType)); + dataAvailabilityChallenge.validateCommitment(unknownTypeCommitment); + } + + /// @notice Test that the `validateCommitment` function reverts for invalid lengths. + function testFuzz_validateCommitment_invalidLength_reverts(uint8 _extraBytes) public { + _extraBytes = uint8(bound(_extraBytes, 1, 100)); + + bytes memory invalidLength = + abi.encodePacked(CommitmentType.Keccak256, keccak256("test"), new bytes(_extraBytes)); - // Should revert if the commitment length does not match vm.expectRevert( - abi.encodeWithSelector(IDataAvailabilityChallenge.InvalidCommitmentLength.selector, uint8(0), 33, 34) + abi.encodeWithSelector( + IDataAvailabilityChallenge.InvalidCommitmentLength.selector, + uint8(CommitmentType.Keccak256), + 33, + 33 + _extraBytes + ) ); - bytes memory invalidLength = abi.encodePacked(CommitmentType.Keccak256, keccak256("test"), "x"); dataAvailabilityChallenge.validateCommitment(invalidLength); } } diff --git a/packages/contracts-bedrock/test/L1/ETHLockbox.t.sol b/packages/contracts-bedrock/test/L1/ETHLockbox.t.sol index 2b0685873a571..3e4d3965e9a64 100644 --- a/packages/contracts-bedrock/test/L1/ETHLockbox.t.sol +++ b/packages/contracts-bedrock/test/L1/ETHLockbox.t.sol @@ -11,6 +11,7 @@ import { Proxy } from "src/universal/Proxy.sol"; import { Constants } from "src/libraries/Constants.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { ForgeArtifacts, StorageSlot } from "scripts/libraries/ForgeArtifacts.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; @@ -36,6 +37,9 @@ contract ETHLockbox_TestInit is CommonTest { // deployed // TODO(#14691): Remove this check once Upgrade 15 is deployed on Mainnet. if (isForkTest() && !deploy.cfg().useUpgradedFork()) vm.skip(true); + + // If the ETHLockbox system feature is not enabled, skip these tests. + skipIfSysFeatureDisabled(Features.ETH_LOCKBOX); } } @@ -639,9 +643,9 @@ contract ETHLockbox_MigrateLiquidity_Test is ETHLockbox_TestInit { } } -/// @title ETHLockbox_Unclassified_Test -/// @notice Contains unclassified tests related to ETHLockbox. -contract ETHLockbox_Unclassified_Test is ETHLockbox_TestInit { +/// @title ETHLockbox_Uncategorized_Test +/// @notice Contains uncategorized tests related to ETHLockbox. +contract ETHLockbox_Uncategorized_Test is ETHLockbox_TestInit { /// @notice Tests the proxy admin owner is correctly returned. function test_proxyProxyAdminOwner_succeeds() public view { assertEq(ethLockbox.proxyAdminOwner(), proxyAdminOwner); diff --git a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol index f1f7ece1c97da..a28e54eafe0e5 100644 --- a/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L1/L1CrossDomainMessenger.t.sol @@ -128,80 +128,41 @@ contract L1CrossDomainMessenger_Initialize_Test is L1CrossDomainMessenger_TestIn vm.prank(_sender); l1CrossDomainMessenger.initialize(systemConfig, optimismPortal2); } -} - -/// @title L1CrossDomainMessenger_Upgrade_Test -/// @notice Reusable test for the current `upgrade` function in the L1CrossDomainMessenger -/// contract. If the `upgrade` function is changed, tests inside of this contract should be -/// updated to reflect the new function. If the `upgrade` function is removed, remove the -/// corresponding tests but leave this contract in place so it\'s easy to add tests back -/// in the future. -contract L1CrossDomainMessenger_Upgrade_Test is L1CrossDomainMessenger_TestInit { - /// @notice Tests that the upgrade() function succeeds. - function test_upgrade_succeeds() external { - // Get the slot for _initial. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1CrossDomainMessenger", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1CrossDomainMessenger), bytes32(slot.slot), bytes32(0)); - - // Verify the initial systemConfig slot is non-zero. - StorageSlot memory systemConfigSlot = ForgeArtifacts.getSlot("L1CrossDomainMessenger", "systemConfig"); - vm.store(address(l1CrossDomainMessenger), bytes32(systemConfigSlot.slot), bytes32(uint256(1))); - assertNotEq(address(l1CrossDomainMessenger.systemConfig()), address(0)); - assertNotEq(vm.load(address(l1CrossDomainMessenger), bytes32(systemConfigSlot.slot)), bytes32(0)); - - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger upgrade(). - vm.prank(address(l1CrossDomainMessenger.proxyAdmin())); - l1CrossDomainMessenger.upgrade(newSystemConfig); - // Verify that the systemConfig was updated. - assertEq(address(l1CrossDomainMessenger.systemConfig()), address(newSystemConfig)); - } - - /// @notice Tests that the upgrade() function reverts if called a second time. - function test_upgrade_upgradeTwice_reverts() external { + /// @notice Fuzz test for initialize with any system config address. + /// @param _systemConfig The system config address to test. + function testFuzz_initialize_anySystemConfig_succeeds(address _systemConfig) external { // Get the slot for _initialized. StorageSlot memory slot = ForgeArtifacts.getSlot("L1CrossDomainMessenger", "_initialized"); // Set the initialized slot to 0. vm.store(address(l1CrossDomainMessenger), bytes32(slot.slot), bytes32(0)); - // Create a new SystemConfig contract - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger first upgrade. - vm.prank(address(l1CrossDomainMessenger.proxyAdmin())); - l1CrossDomainMessenger.upgrade(newSystemConfig); + // Initialize with the fuzzed system config address + vm.prank(address(proxyAdmin)); + l1CrossDomainMessenger.initialize(ISystemConfig(_systemConfig), optimismPortal2); - // Try to trigger second upgrade. - vm.prank(address(l1CrossDomainMessenger.proxyAdmin())); - vm.expectRevert("Initializable: contract is already initialized"); - l1CrossDomainMessenger.upgrade(newSystemConfig); + // Verify the address was set correctly + assertEq(address(l1CrossDomainMessenger.systemConfig()), _systemConfig); + assertEq(address(l1CrossDomainMessenger.portal()), address(optimismPortal2)); } - /// @notice Tests that the upgrade() function reverts if called by a non-proxy admin or owner. - /// @param _sender The address of the sender to test. - function testFuzz_upgrade_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - // Prank as the not ProxyAdmin or ProxyAdmin owner. - vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); - + /// @notice Fuzz test for initialize with any portal address. + /// @param _portal The portal address to test. + function testFuzz_initialize_anyPortal_succeeds(address _portal) external { // Get the slot for _initialized. StorageSlot memory slot = ForgeArtifacts.getSlot("L1CrossDomainMessenger", "_initialized"); // Set the initialized slot to 0. vm.store(address(l1CrossDomainMessenger), bytes32(slot.slot), bytes32(0)); - // Create a new SystemConfig contract - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); + // Initialize with the fuzzed portal address + vm.prank(address(proxyAdmin)); + l1CrossDomainMessenger.initialize(systemConfig, IOptimismPortal2(payable(_portal))); - // Call the `upgrade` function with the sender - // Expect the revert with `ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner` selector - vm.prank(_sender); - vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); - l1CrossDomainMessenger.upgrade(newSystemConfig); + // Verify the address was set correctly + assertEq(address(l1CrossDomainMessenger.systemConfig()), address(systemConfig)); + assertEq(address(l1CrossDomainMessenger.portal()), _portal); } } @@ -239,6 +200,16 @@ contract L1CrossDomainMessenger_SuperchainConfig_Test is L1CrossDomainMessenger_ } } +/// @title L1CrossDomainMessenger_portal_Test +/// @notice Tests for the `PORTAL` legacy getter function of the L1CrossDomainMessenger. +contract L1CrossDomainMessenger_portal_Test is L1CrossDomainMessenger_TestInit { + /// @notice Tests that `PORTAL` returns the correct portal address. + function test_portal_succeeds() external view { + assertEq(address(l1CrossDomainMessenger.PORTAL()), address(optimismPortal2)); + assertEq(address(l1CrossDomainMessenger.PORTAL()), address(l1CrossDomainMessenger.portal())); + } +} + /// @notice The following tests are not testing any function of the L1CrossDomainMessenger /// contract directly, but are testing the functionality of the CrossDomainMessenger /// contract that is inherited from. @@ -291,6 +262,32 @@ contract L1CrossDomainMessenger_SendMessage_Test is L1CrossDomainMessenger_TestI l1CrossDomainMessenger.sendMessage(recipient, hex"ff", uint32(100)); } + /// @notice Fuzz test for sendMessage with various gas limits and message data. + /// @param _gasLimit Gas limit for the message (bounded to reasonable range). + /// @param _message Message data to send. + /// @param _sender Address sending the message. + function testFuzz_sendMessage_varyingInputs_succeeds( + uint32 _gasLimit, + bytes calldata _message, + address _sender + ) + external + { + // Bound gas limit to reasonable range to avoid OutOfGas errors + _gasLimit = uint32(bound(uint256(_gasLimit), 21000, 1_000_000)); + // Bound message length to avoid excessive gas costs + vm.assume(_message.length <= 1000); + vm.assume(_sender != address(0)); + + uint256 nonceBefore = l1CrossDomainMessenger.messageNonce(); + + vm.prank(_sender); + l1CrossDomainMessenger.sendMessage(recipient, _message, _gasLimit); + + // Verify nonce incremented + assertEq(l1CrossDomainMessenger.messageNonce(), nonceBefore + 1); + } + /// @notice Tests that the sendMessage function is able to send the same message twice. function test_sendMessage_twice_succeeds() external { uint256 nonce = l1CrossDomainMessenger.messageNonce(); @@ -299,9 +296,34 @@ contract L1CrossDomainMessenger_SendMessage_Test is L1CrossDomainMessenger_TestI // the nonce increments for each message sent assertEq(nonce + 2, l1CrossDomainMessenger.messageNonce()); } + + /// @notice Tests sendMessage with zero gas limit. + function test_sendMessage_zeroGasLimit_succeeds() external { + uint256 nonce = l1CrossDomainMessenger.messageNonce(); + + // Even with zero gas limit, message should send + vm.expectEmit(address(l1CrossDomainMessenger)); + emit SentMessage(recipient, alice, hex"1234", nonce, 0); + + vm.prank(alice); + l1CrossDomainMessenger.sendMessage(recipient, hex"1234", 0); + + // Verify nonce incremented + assertEq(l1CrossDomainMessenger.messageNonce(), nonce + 1); + } + + /// @notice Tests sendMessage with high gas limit that causes OutOfGas. + function test_sendMessage_highGasLimit_reverts() external { + // Very high gas limit causes OutOfGas error in portal deposit + uint32 highGasLimit = 30_000_000; + + vm.prank(alice); + vm.expectRevert("OutOfGas()"); + l1CrossDomainMessenger.sendMessage(recipient, hex"5678", highGasLimit); + } } -/// @title L1CrossDomainMessenger_Unclassified_Test +/// @title L1CrossDomainMessenger_Uncategorized_Test /// @notice General tests that are not testing any function directly of the L1CrossDomainMessenger /// but are testing functionality of the CrossDomainMessenger contract that is inherited /// from. @@ -429,6 +451,44 @@ contract L1CrossDomainMessenger_Uncategorized_Test is L1CrossDomainMessenger_Tes assertEq(l1CrossDomainMessenger.failedMessages(hash), false); } + /// @notice Fuzz test for relaying messages with various parameters. + /// @param _target Target address for the message. + /// @param _minGasLimit Minimum gas limit for message execution. + /// @param _message Message data to relay. + function testFuzz_relayMessage_varyingInputs_succeeds( + address _target, + uint32 _minGasLimit, + bytes calldata _message + ) + external + { + // Ensure target is not a blocked address + vm.assume(_target != address(l1CrossDomainMessenger)); + vm.assume(_target != address(optimismPortal2)); + vm.assume(_target != address(0)); + + // Bound gas limit and message size to avoid OutOfGas errors + _minGasLimit = uint32(bound(uint256(_minGasLimit), 0, 100_000)); + vm.assume(_message.length <= 100); + + address sender = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + + // set the value of op.l2Sender() to be the L2 Cross Domain Messenger. + vm.store(address(optimismPortal2), bytes32(senderSlotIndex), bytes32(abi.encode(sender))); + + bytes32 hash = Hashing.hashCrossDomainMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, _target, 0, _minGasLimit, _message + ); + + vm.prank(address(optimismPortal2)); + l1CrossDomainMessenger.relayMessage( + Encoding.encodeVersionedNonce({ _nonce: 0, _version: 1 }), sender, _target, 0, _minGasLimit, _message + ); + + // Verify message was relayed (either successfully or failed) + assertTrue(l1CrossDomainMessenger.successfulMessages(hash) || l1CrossDomainMessenger.failedMessages(hash)); + } + /// @notice Tests that `relayMessage` reverts if the caller is optimismPortal2 and the value /// sent does not match the amount. function test_relayMessage_fromOtherMessengerValueMismatch_reverts() external { @@ -699,6 +759,13 @@ contract L1CrossDomainMessenger_Uncategorized_Test is L1CrossDomainMessenger_Tes l1CrossDomainMessenger.xDomainMessageSender(); } + /// @notice Tests that xDomainMessageSender is never set during sendMessage. + function test_xDomainMessageSender_duringSend_reverts() external { + // XDomainMessageSender is only set during relayMessage, not sendMessage + vm.expectRevert("CrossDomainMessenger: xDomainMessageSender is not set"); + l1CrossDomainMessenger.xDomainMessageSender(); + } + /// @notice Tests that `relayMessage` should successfully call the target contract after the /// first message fails and ETH is stuck, but the second message succeeds with a /// version 1 message. diff --git a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol index 33a471d48bb9e..8bf3d99f0f69a 100644 --- a/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1ERC721Bridge.t.sol @@ -139,78 +139,6 @@ contract L1ERC721Bridge_Initialize_Test is L1ERC721Bridge_TestInit { } } -/// @title L1ERC721Bridge_Upgrade_Test -/// @notice Reusable test for the current upgrade() function in the L1ERC721Bridge contract. If -/// the upgrade() function is changed, tests inside of this contract should be updated to -/// reflect the new function. If the upgrade() function is removed, remove the -/// corresponding tests but leave this contract in place so it's easy to add tests back -/// in the future. -contract L1ERC721Bridge_Upgrade_Test is L1ERC721Bridge_TestInit { - /// @notice Tests that the upgrade() function succeeds. - function test_upgrade_succeeds() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1ERC721Bridge", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1ERC721Bridge), bytes32(slot.slot), bytes32(0)); - - // Verify the initial systemConfig slot is non-zero. - StorageSlot memory systemConfigSlot = ForgeArtifacts.getSlot("L1ERC721Bridge", "systemConfig"); - vm.store(address(l1ERC721Bridge), bytes32(systemConfigSlot.slot), bytes32(uint256(1))); - assertNotEq(address(l1ERC721Bridge.systemConfig()), address(0)); - assertNotEq(vm.load(address(l1ERC721Bridge), bytes32(systemConfigSlot.slot)), bytes32(0)); - - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger upgrade(). - vm.prank(address(l1ERC721Bridge.proxyAdmin())); - l1ERC721Bridge.upgrade(newSystemConfig); - - // Verify that the systemConfig was updated. - assertEq(address(l1ERC721Bridge.systemConfig()), address(newSystemConfig)); - } - - /// @notice Tests that the upgrade() function reverts if called a second time. - function test_upgrade_upgradeTwice_reverts() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1ERC721Bridge", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1ERC721Bridge), bytes32(slot.slot), bytes32(0)); - - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger first upgrade. - vm.prank(address(l1ERC721Bridge.proxyAdmin())); - l1ERC721Bridge.upgrade(newSystemConfig); - - // Try to trigger second upgrade. - vm.prank(address(l1ERC721Bridge.proxyAdmin())); - vm.expectRevert("Initializable: contract is already initialized"); - l1ERC721Bridge.upgrade(newSystemConfig); - } - - /// @notice Tests that the upgrade() function reverts if called by a non-proxy admin or owner. - /// @param _sender The address of the sender to test. - function testFuzz_upgrade_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - // Prank as the not ProxyAdmin or ProxyAdmin owner. - vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); - - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1ERC721Bridge", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1ERC721Bridge), bytes32(slot.slot), bytes32(0)); - - // Expect the revert with `ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner` selector - vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); - - // Call the `upgrade` function with the sender - vm.prank(_sender); - l1ERC721Bridge.upgrade(ISystemConfig(address(0xdeadbeef))); - } -} - /// @title L1ERC721Bridge_SuperchainConfig_Test /// @notice Test contract for L1ERC721Bridge `superchainConfig` function. contract L1ERC721Bridge_SuperchainConfig_Test is L1ERC721Bridge_TestInit { @@ -223,9 +151,9 @@ contract L1ERC721Bridge_SuperchainConfig_Test is L1ERC721Bridge_TestInit { /// @title L1ERC721Bridge_Version_Test /// @notice Test contract for L1ERC721Bridge `version` constant. contract L1ERC721Bridge_Version_Test is L1ERC721Bridge_TestInit { - /// @notice Verifies version returns the expected semantic version. + /// @notice Tests that the version function returns a non-empty string. function test_version_succeeds() external view { - assertEq(l1ERC721Bridge.version(), "2.7.0"); + assert(bytes(l1ERC721Bridge.version()).length > 0); } } diff --git a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol index a67aeb6f7148f..03ebb72969608 100644 --- a/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L1/L1StandardBridge.t.sol @@ -14,6 +14,7 @@ import { StandardBridge } from "src/universal/StandardBridge.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; @@ -217,71 +218,6 @@ contract L1StandardBridge_Initialize_Test is CommonTest { } } -/// @title L1StandardBridge_Upgrade_Test -/// @notice Reusable test for the current upgrade() function in the L1StandardBridge contract. If -/// the upgrade() function is changed, tests inside of this contract should be updated to -/// reflect the new function. If the upgrade() function is removed, remove the -/// corresponding tests but leave this contract in place so it's easy to add tests back -/// in the future. -contract L1StandardBridge_Upgrade_Test is CommonTest { - /// @notice Tests that the upgrade() function succeeds. - function test_upgrade_succeeds() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1StandardBridge", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1StandardBridge), bytes32(slot.slot), bytes32(0)); - - // Verify the initial systemConfig slot is non-zero. - StorageSlot memory systemConfigSlot = ForgeArtifacts.getSlot("L1StandardBridge", "systemConfig"); - vm.store(address(l1StandardBridge), bytes32(systemConfigSlot.slot), bytes32(uint256(1))); - assertNotEq(address(l1StandardBridge.systemConfig()), address(0)); - assertNotEq(vm.load(address(l1StandardBridge), bytes32(systemConfigSlot.slot)), bytes32(0)); - - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger upgrade(). - vm.prank(address(l1StandardBridge.proxyAdmin())); - l1StandardBridge.upgrade(newSystemConfig); - - // Verify that the systemConfig was updated. - assertEq(address(l1StandardBridge.systemConfig()), address(newSystemConfig)); - } - - /// @notice Tests that the upgrade() function reverts if called a second time. - function test_upgrade_upgradeTwice_reverts() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("L1StandardBridge", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(l1StandardBridge), bytes32(slot.slot), bytes32(0)); - - ISystemConfig newSystemConfig = ISystemConfig(address(0xdeadbeef)); - - // Trigger first upgrade. - vm.prank(address(l1StandardBridge.proxyAdmin())); - l1StandardBridge.upgrade(newSystemConfig); - - // Try to trigger second upgrade. - vm.prank(address(l1StandardBridge.proxyAdmin())); - vm.expectRevert("Initializable: contract is already initialized"); - l1StandardBridge.upgrade(newSystemConfig); - } - - /// @notice Verifies upgrade reverts with random unauthorized addresses - /// @param _sender Random address for access control test - function testFuzz_upgrade_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); - - StorageSlot memory slot = ForgeArtifacts.getSlot("L1StandardBridge", "_initialized"); - vm.store(address(l1StandardBridge), bytes32(slot.slot), bytes32(0)); - - vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); - vm.prank(_sender); - l1StandardBridge.upgrade(ISystemConfig(address(0xdeadbeef))); - } -} - /// @title L1StandardBridge_Paused_Test /// @notice Tests the `paused` function of the `L1StandardBridge` contract. contract L1StandardBridge_Paused_Test is CommonTest { @@ -421,8 +357,13 @@ contract L1StandardBridge_Receive_Test is CommonTest { vm.prank(alice, alice); (bool success,) = address(l1StandardBridge).call{ value: 100 }(hex""); assertEq(success, true); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 100); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 100); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 100); + } } /// @notice Verifies receive function reverts when called by contracts @@ -448,8 +389,13 @@ contract L1StandardBridge_DepositETH_Test is L1StandardBridge_TestInit { uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; l1StandardBridge.depositETH{ value: 500 }(50000, hex"dead"); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 500); + } } /// @notice Tests that depositing ETH succeeds for an EOA using 7702 delegation. @@ -461,8 +407,13 @@ contract L1StandardBridge_DepositETH_Test is L1StandardBridge_TestInit { uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; l1StandardBridge.depositETH{ value: 500 }(50000, hex"dead"); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 500); + } } /// @notice Tests that depositing ETH reverts if the call is not from an EOA. @@ -487,8 +438,13 @@ contract L1StandardBridge_DepositETHTo_Test is L1StandardBridge_TestInit { uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; l1StandardBridge.depositETHTo{ value: 600 }(bob, 60000, hex"dead"); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 600); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 600); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 600); + } } /// @notice Verifies depositETHTo succeeds with various recipients and amounts @@ -500,10 +456,17 @@ contract L1StandardBridge_DepositETHTo_Test is L1StandardBridge_TestInit { vm.deal(alice, _amount); + uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; + vm.prank(alice); l1StandardBridge.depositETHTo{ value: _amount }(_to, 60000, hex"dead"); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + _amount); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + _amount); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + _amount); + } } } @@ -820,8 +783,13 @@ contract L1StandardBridge_Uncategorized_Test is L1StandardBridge_TestInit { uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; l1StandardBridge.bridgeETH{ value: 500 }(50000, hex"dead"); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 500); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 500); + } } /// @notice Tests that bridging ETH to a different address succeeds. @@ -834,8 +802,13 @@ contract L1StandardBridge_Uncategorized_Test is L1StandardBridge_TestInit { uint256 portalBalanceBefore = address(optimismPortal2).balance; uint256 ethLockboxBalanceBefore = address(ethLockbox).balance; l1StandardBridge.bridgeETHTo{ value: 600 }(bob, 60000, hex"dead"); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 600); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, ethLockboxBalanceBefore + 600); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + 600); + } } /// @notice Tests that finalizing bridged ETH succeeds. diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index b116c9a695e0f..e2ffb7ed48f03 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -9,57 +9,42 @@ import { DeployOPChain_TestBase } from "test/opcm/DeployOPChain.t.sol"; import { DelegateCaller } from "test/mocks/Callers.sol"; // Scripts -import { DeployOPChainInput } from "scripts/deploy/DeployOPChain.s.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Deploy } from "scripts/deploy/Deploy.s.sol"; import { VerifyOPCM } from "scripts/deploy/VerifyOPCM.s.sol"; +import { DeployOPChain } from "scripts/deploy/DeployOPChain.s.sol"; import { Config } from "scripts/libraries/Config.sol"; -import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; +import { Types } from "scripts/libraries/Types.sol"; // Libraries import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; -import { Blueprint } from "src/libraries/Blueprint.sol"; -import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; -import { Bytes } from "src/libraries/Bytes.sol"; import { GameType, Duration, Hash, Claim } from "src/dispute/lib/LibUDT.sol"; import { Proposal, GameTypes } from "src/dispute/lib/Types.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; -import { IL1ERC721Bridge } from "interfaces/L1/IL1ERC721Bridge.sol"; -import { IL1StandardBridge } from "interfaces/L1/IL1StandardBridge.sol"; -import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; -import { IL1CrossDomainMessenger } from "interfaces/L1/IL1CrossDomainMessenger.sol"; -import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; -import { IProxy } from "interfaces/universal/IProxy.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProtocolVersions } from "interfaces/L1/IProtocolVersions.sol"; -import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; import { IOPContractsManager, - IOPCMImplementationsWithoutLockbox, IOPContractsManagerGameTypeAdder, - IOPContractsManagerDeployer, - IOPContractsManagerUpgrader, - IOPContractsManagerContractsContainer, IOPContractsManagerInteropMigrator, - IOPContractsManagerStandardValidator + IOPContractsManagerUpgrader } from "interfaces/L1/IOPContractsManager.sol"; -import { IOPContractsManager200 } from "interfaces/L1/IOPContractsManager200.sol"; -import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { IBigStepper } from "interfaces/dispute/IBigStepper.sol"; import { ISuperFaultDisputeGame } from "interfaces/dispute/ISuperFaultDisputeGame.sol"; import { ISuperPermissionedDisputeGame } from "interfaces/dispute/ISuperPermissionedDisputeGame.sol"; -import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; // Contracts import { @@ -84,9 +69,7 @@ contract OPContractsManager_Harness is OPContractsManager { OPContractsManagerStandardValidator _opcmStandardValidator, ISuperchainConfig _superchainConfig, IProtocolVersions _protocolVersions, - IProxyAdmin _superchainProxyAdmin, - string memory _l1ContractsRelease, - address _upgradeController + IProxyAdmin _superchainProxyAdmin ) OPContractsManager( _opcmGameTypeAdder, @@ -96,9 +79,7 @@ contract OPContractsManager_Harness is OPContractsManager { _opcmStandardValidator, _superchainConfig, _protocolVersions, - _superchainProxyAdmin, - _l1ContractsRelease, - _upgradeController + _superchainProxyAdmin ) { } @@ -126,8 +107,10 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { // The ImplementationSet event emitted by the DisputeGameFactory contract. event ImplementationSet(address indexed impl, GameType indexed gameType); + /// @notice Thrown when testing with an unsupported chain ID. + error UnsupportedChainId(); + uint256 l2ChainId; - IProxyAdmin superchainProxyAdmin; address upgrader; IOPContractsManager.OpChainConfig[] opChainConfigs; Claim absolutePrestate; @@ -146,7 +129,6 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { ); absolutePrestate = Claim.wrap(bytes32(keccak256("absolutePrestate"))); - superchainProxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))); upgrader = proxyAdmin.owner(); vm.label(upgrader, "ProxyAdmin Owner"); @@ -170,548 +152,186 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { delayedWeth = IDelayedWETH(payable(artifacts.mustGetAddress("PermissionlessDelayedWETHProxy"))); permissionedDisputeGame = IPermissionedDisputeGame(address(artifacts.mustGetAddress("PermissionedDisputeGame"))); faultDisputeGame = IFaultDisputeGame(address(artifacts.mustGetAddress("FaultDisputeGame"))); - } - function expectEmitUpgraded(address impl, address proxy) public { - vm.expectEmit(proxy); - emit Upgraded(impl); + // Since this superchainConfig is already at the expected reinitializer version... + // We do this to pass the reinitializer check when trying to upgrade the superchainConfig contract. + + // Get the value of the 0th storage slot of the superchainConfig contract. + bytes32 slot0 = vm.load(address(superchainConfig), bytes32(0)); + // Remove the value of initialized slot. + slot0 = slot0 & bytes32(~uint256(0xff)); + // Store 1 there. + slot0 = bytes32(uint256(slot0) + 1); + // Store the new value. + vm.store(address(superchainConfig), bytes32(0), slot0); } - function runUpgrade13UpgradeAndChecks(address _delegateCaller) public { - // The address below corresponds with the address of the v2.0.0-rc.1 OPCM on mainnet. - address OPCM_ADDRESS = 0x026b2F158255Beac46c1E7c6b8BbF29A4b6A7B76; - - IOPContractsManager deployedOPCM = IOPContractsManager(OPCM_ADDRESS); - IOPCMImplementationsWithoutLockbox.Implementations memory impls = - IOPCMImplementationsWithoutLockbox(address(deployedOPCM)).implementations(); - - // Always trigger U13 once with an empty opChainConfig array to ensure that the - // SuperchainConfig contract is upgraded. Separate context to avoid stack too deep. - { - ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); - address superchainPAO = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); - vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - DelegateCaller(superchainPAO).dcForward( - OPCM_ADDRESS, abi.encodeCall(IOPContractsManager.upgrade, (new IOPContractsManager.OpChainConfig[](0))) + /// @notice Helper function that runs an OPCM upgrade, asserts that the upgrade was successful, + /// asserts that it fits within a certain amount of gas, and runs the StandardValidator + /// over the result. + /// @param _opcm The OPCM contract to upgrade with. + /// @param _delegateCaller The address of the delegate caller to use for the upgrade. + /// @param _revertBytes The bytes of the revert to expect. + function _runOpcmUpgradeAndChecks( + IOPContractsManager _opcm, + address _delegateCaller, + bytes memory _revertBytes + ) + internal + { + // Always start by upgrading the SuperchainConfig contract. + // Temporarily replace the superchainPAO with a DelegateCaller. + address superchainPAO = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); + bytes memory superchainPAOCode = address(superchainPAO).code; + vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + + // Execute the SuperchainConfig upgrade. + // nosemgrep: sol-safety-trycatch-eip150 + try DelegateCaller(superchainPAO).dcForward( + address(_opcm), + abi.encodeCall(IOPContractsManager.upgradeSuperchainConfig, (superchainConfig, superchainProxyAdmin)) + ) { + // Great, the upgrade succeeded. + } catch (bytes memory reason) { + // Only acceptable revert reason is the SuperchainConfig already being up to date. This + // try/catch is better than checking the version via the implementations struct because + // the implementations struct interface can change between OPCM versions which would + // cause the test to break and be a pain to resolve. + assertTrue( + bytes4(reason) + == IOPContractsManagerUpgrader.OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate.selector, + "Revert reason other than SuperchainConfigAlreadyUpToDate" ); } - // Cache the old L1xDM address so we can look for it in the AddressManager's event - address oldL1CrossDomainMessenger = addressManager.getAddress("OVM_L1CrossDomainMessenger"); - - // Predict the address of the new AnchorStateRegistry proxy - bytes32 salt = keccak256( - abi.encode( - l2ChainId, - string.concat( - string(bytes.concat(bytes32(uint256(uint160(address(opChainConfigs[0].systemConfigProxy)))))) - ), - "AnchorStateRegistry" - ) - ); - address proxyBp = IOPContractsManager200(address(deployedOPCM)).blueprints().proxy; - Blueprint.Preamble memory preamble = Blueprint.parseBlueprintPreamble(proxyBp.code); - bytes memory initCode = bytes.concat(preamble.initcode, abi.encode(proxyAdmin)); - address newAnchorStateRegistryProxy = vm.computeCreate2Address(salt, keccak256(initCode), _delegateCaller); - vm.label(newAnchorStateRegistryProxy, "NewAnchorStateRegistryProxy"); - - expectEmitUpgraded(impls.systemConfigImpl, address(systemConfig)); - vm.expectEmit(address(addressManager)); - emit AddressSet("OVM_L1CrossDomainMessenger", impls.l1CrossDomainMessengerImpl, oldL1CrossDomainMessenger); - // This is where we would emit an event for the L1StandardBridge however - // the Chugsplash proxy does not emit such an event. - expectEmitUpgraded(impls.l1ERC721BridgeImpl, address(l1ERC721Bridge)); - expectEmitUpgraded(impls.disputeGameFactoryImpl, address(disputeGameFactory)); - expectEmitUpgraded(impls.optimismPortalImpl, address(optimismPortal2)); - expectEmitUpgraded(impls.optimismMintableERC20FactoryImpl, address(l1OptimismMintableERC20Factory)); - vm.expectEmit(address(newAnchorStateRegistryProxy)); - emit AdminChanged(address(0), address(proxyAdmin)); - expectEmitUpgraded(impls.anchorStateRegistryImpl, address(newAnchorStateRegistryProxy)); - expectEmitUpgraded(impls.delayedWETHImpl, address(delayedWETHPermissionedGameProxy)); - - // We don't yet know the address of the new permissionedGame which will be deployed by the - // OPContractsManager.upgrade() call, so ignore the first topic. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.PERMISSIONED_CANNON); - - IFaultDisputeGame oldFDG = IFaultDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.CANNON))); - if (address(oldFDG) != address(0)) { - IDelayedWETH weth = oldFDG.weth(); - expectEmitUpgraded(impls.delayedWETHImpl, address(weth)); - - // Ignore the first topic for the same reason as the previous comment. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.CANNON); - } - - vm.expectEmit(address(_delegateCaller)); - emit Upgraded(l2ChainId, opChainConfigs[0].systemConfigProxy, address(_delegateCaller)); + // Reset the superchainPAO to the original code. + vm.etch(superchainPAO, superchainPAOCode); - // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, - // then reset its code to the original code. + // Temporarily replace the upgrader with a DelegateCaller. bytes memory delegateCallerCode = address(_delegateCaller).code; vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - DelegateCaller(_delegateCaller).dcForward( - address(deployedOPCM), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) - ); - - VmSafe.Gas memory gas = vm.lastCallGas(); - - // Less than 90% of the gas target of 20M to account for the gas used by using Safe. - assertLt(gas.gasTotalUsed, 0.9 * 20_000_000, "Upgrade exceeds gas target of 15M"); - - vm.etch(_delegateCaller, delegateCallerCode); - - // Check the implementations of the core addresses - assertEq(impls.systemConfigImpl, EIP1967Helper.getImplementation(address(systemConfig))); - assertEq(impls.l1ERC721BridgeImpl, EIP1967Helper.getImplementation(address(l1ERC721Bridge))); - assertEq(impls.disputeGameFactoryImpl, EIP1967Helper.getImplementation(address(disputeGameFactory))); - assertEq(impls.optimismPortalImpl, EIP1967Helper.getImplementation(address(optimismPortal2))); - assertEq( - impls.optimismMintableERC20FactoryImpl, - EIP1967Helper.getImplementation(address(l1OptimismMintableERC20Factory)) - ); - assertEq(impls.l1StandardBridgeImpl, EIP1967Helper.getImplementation(address(l1StandardBridge))); - assertEq(impls.l1CrossDomainMessengerImpl, addressManager.getAddress("OVM_L1CrossDomainMessenger")); - - // Check the implementations of the FP contracts - assertEq(impls.anchorStateRegistryImpl, EIP1967Helper.getImplementation(address(newAnchorStateRegistryProxy))); - assertEq(impls.delayedWETHImpl, EIP1967Helper.getImplementation(address(delayedWETHPermissionedGameProxy))); - - // Check that the PermissionedDisputeGame is upgraded to the expected version, references - // the correct anchor state and has the mipsImpl. - IPermissionedDisputeGame pdg = - IPermissionedDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON))); - assertEq(ISemver(address(pdg)).version(), "1.4.1"); - assertEq(address(pdg.anchorStateRegistry()), address(newAnchorStateRegistryProxy)); - assertEq(address(pdg.vm()), impls.mipsImpl); - - if (address(oldFDG) != address(0)) { - // Check that the PermissionlessDisputeGame is upgraded to the expected version - IFaultDisputeGame newFDG = IFaultDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.CANNON))); - // Check that the PermissionlessDisputeGame is upgraded to the expected version, - // references the correct anchor state and has the mipsImpl. - assertEq(impls.delayedWETHImpl, EIP1967Helper.getImplementation(address(newFDG.weth()))); - assertEq(ISemver(address(newFDG)).version(), "1.4.1"); - assertEq(address(newFDG.anchorStateRegistry()), address(newAnchorStateRegistryProxy)); - assertEq(address(newFDG.vm()), impls.mipsImpl); - } - } - - function runUpgrade14UpgradeAndChecks(address _delegateCaller) public { - address OPCM_ADDRESS = 0x3A1f523a4bc09cd344A2745a108Bb0398288094F; - - IOPContractsManager deployedOPCM = IOPContractsManager(OPCM_ADDRESS); - IOPCMImplementationsWithoutLockbox.Implementations memory impls = - IOPCMImplementationsWithoutLockbox(address(deployedOPCM)).implementations(); - - address mainnetPAO = artifacts.mustGetAddress("SuperchainConfigProxy"); - - // If the delegate caller is not the mainnet PAO, we need to call upgrade as the mainnet - // PAO first. - if (_delegateCaller != mainnetPAO) { - IOPContractsManager.OpChainConfig[] memory opmChain = new IOPContractsManager.OpChainConfig[](0); - ISuperchainConfig superchainConfig = ISuperchainConfig(mainnetPAO); - - address opmUpgrader = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); - vm.etch(opmUpgrader, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - - DelegateCaller(opmUpgrader).dcForward(OPCM_ADDRESS, abi.encodeCall(IOPContractsManager.upgrade, (opmChain))); + // Expect the revert if one is specified. + if (_revertBytes.length > 0) { + vm.expectRevert(_revertBytes); } - // sanity check - IPermissionedDisputeGame oldPDG = - IPermissionedDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON))); - IFaultDisputeGame oldFDG = IFaultDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.CANNON))); - - // Sanity check that the mips IMPL is not MIPS64 - assertNotEq(address(oldPDG.vm()), impls.mipsImpl); - - // We don't yet know the address of the new permissionedGame which will be deployed by the - // OPContractsManager.upgrade() call, so ignore the first topic. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.PERMISSIONED_CANNON); - - if (address(oldFDG) != address(0)) { - // Sanity check that the mips IMPL is not MIPS64 - assertNotEq(address(oldFDG.vm()), impls.mipsImpl); - // Ignore the first topic for the same reason as the previous comment. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.CANNON); - } - vm.expectEmit(address(_delegateCaller)); - emit Upgraded(l2ChainId, opChainConfigs[0].systemConfigProxy, address(_delegateCaller)); - - // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, - // then reset its code to the original code. - bytes memory delegateCallerCode = address(_delegateCaller).code; - vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - + // Execute the chain upgrade. DelegateCaller(_delegateCaller).dcForward( - address(deployedOPCM), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) + address(_opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) ); - VmSafe.Gas memory gas = vm.lastCallGas(); + // Return early if a revert was expected. Otherwise we'll get errors below. + if (_revertBytes.length > 0) { + return; + } - // Less than 90% of the gas target of 20M to account for the gas used by using Safe. - assertLt(gas.gasTotalUsed, 0.9 * 20_000_000, "Upgrade exceeds gas target of 15M"); + // Less than 90% of the gas target of 2**24 (EIP-7825) to account for the gas used by + // using Safe. + uint256 fusakaLimit = 2 ** 24; + VmSafe.Gas memory gas = vm.lastCallGas(); + assertLt(gas.gasTotalUsed, fusakaLimit * 9 / 10, "Upgrade exceeds gas target of 90% of 2**24 (EIP-7825)"); + // Reset the upgrader to the original code. vm.etch(_delegateCaller, delegateCallerCode); - // Check that the PermissionedDisputeGame is upgraded to the expected version, references - // the correct anchor state and has the mipsImpl. - IPermissionedDisputeGame pdg = - IPermissionedDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON))); - assertEq(ISemver(address(pdg)).version(), "1.4.1"); - assertEq(address(pdg.vm()), impls.mipsImpl); - - // Check that the SystemConfig is upgraded to the expected version - assertEq(ISemver(address(systemConfig)).version(), "2.5.0"); - assertEq(impls.systemConfigImpl, EIP1967Helper.getImplementation(address(systemConfig))); - - if (address(oldFDG) != address(0)) { - // Check that the PermissionlessDisputeGame is upgraded to the expected version - IFaultDisputeGame newFDG = IFaultDisputeGame(address(disputeGameFactory.gameImpls(GameTypes.CANNON))); - // Check that the PermissionlessDisputeGame is upgraded to the expected version, - // references the correct anchor state and has the mipsImpl. - assertEq(ISemver(address(newFDG)).version(), "1.4.1"); - assertEq(address(newFDG.vm()), impls.mipsImpl); - } - } - - function runUpgrade15UpgradeAndChecks(address _delegateCaller) public { - IOPContractsManager.Implementations memory impls = opcm.implementations(); + // We expect there to only be one chain config for these tests, you will have to rework + // this test if you add more. + assertEq(opChainConfigs.length, 1); - // Always trigger U15 once with an empty opChainConfig array to ensure that the - // SuperchainConfig contract is upgraded. Separate context to avoid stack too deep. - { - ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); - address superchainPAO = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); - vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - DelegateCaller(superchainPAO).dcForward( - address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (new IOPContractsManager.OpChainConfig[](0))) - ); + // Coverage changes bytecode, so we get various errors. We can safely ignore the result of + // the standard validator in the coverage case, if the validator is failing in coverage + // then it will also fail in other CI tests (unless it's the expected issues, in which case + // we can safely skip). + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + return; } - // Predict the address of the new AnchorStateRegistry proxy. - // Subcontext to avoid stack too deep. - address newAsrProxy; - { - // Compute the salt using the system config address. - bytes32 salt = keccak256( - abi.encode( - l2ChainId, - string.concat(string(bytes.concat(bytes32(uint256(uint160(address(systemConfig))))))), - "AnchorStateRegistry-U16" - ) - ); - - // Use the actual proxy instead of the local code so we can reuse this test. - address proxyBp = opcm.blueprints().proxy; - Blueprint.Preamble memory preamble = Blueprint.parseBlueprintPreamble(proxyBp.code); - bytes memory initCode = bytes.concat(preamble.initcode, abi.encode(proxyAdmin)); - newAsrProxy = vm.computeCreate2Address(salt, keccak256(initCode), _delegateCaller); - vm.label(newAsrProxy, "NewAnchorStateRegistryProxy"); + // Grab the validator before we do the error assertion because otherwise the assertion will + // try to apply to this function call instead. + IOPContractsManagerStandardValidator validator = _opcm.opcmStandardValidator(); + + // If the absolute prestate is zero, we will always get a PDDG-40,PLDG-40 error here in the + // standard validator. This happens because an absolute prestate of zero means that the + // user is requesting to use the existing prestate. We could avoid the error by grabbing + // the prestate from the actual contracts, but that doesn't actually give us any valuable + // checks. Easier to just expect the error in this case. + if (opChainConfigs[0].absolutePrestate.raw() == bytes32(0)) { + vm.expectRevert("OPContractsManagerStandardValidator: PDDG-40,PLDG-40"); } - // Grab the PermissionedDisputeGame and FaultDisputeGame implementations before upgrade. - address oldPDGImpl = address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON)); - address oldFDGImpl = address(disputeGameFactory.gameImpls(GameTypes.CANNON)); - IPermissionedDisputeGame oldPDG = IPermissionedDisputeGame(oldPDGImpl); - IFaultDisputeGame oldFDG = IFaultDisputeGame(oldFDGImpl); - - // Expect the SystemConfig and OptimismPortal to be upgraded. - expectEmitUpgraded(impls.systemConfigImpl, address(systemConfig)); - expectEmitUpgraded(impls.optimismPortalImpl, address(optimismPortal2)); - - // We always expect the PermissionedDisputeGame to be deployed. We don't yet know the - // address of the new permissionedGame which will be deployed by the - // OPContractsManager.upgrade() call, so ignore the first topic. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.PERMISSIONED_CANNON); - - // If the old FaultDisputeGame exists, we expect it to be upgraded. - if (address(oldFDG) != address(0)) { - // Ignore the first topic for the same reason as the previous comment. - vm.expectEmit(false, true, true, true, address(disputeGameFactory)); - emit ImplementationSet(address(0), GameTypes.CANNON); - } - - vm.expectEmit(address(_delegateCaller)); - emit Upgraded(l2ChainId, systemConfig, address(_delegateCaller)); - - // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, - // then reset its code to the original code. - bytes memory delegateCallerCode = address(_delegateCaller).code; - vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - - // Execute the upgrade. - // We use the new format here, not the legacy one. - DelegateCaller(_delegateCaller).dcForward( - address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) + // Run the StandardValidator checks. + validator.validate( + IOPContractsManagerStandardValidator.ValidationInput({ + proxyAdmin: opChainConfigs[0].proxyAdmin, + sysCfg: opChainConfigs[0].systemConfigProxy, + absolutePrestate: opChainConfigs[0].absolutePrestate.raw(), + l2ChainID: l2ChainId + }), + false ); + } - // Less than 90% of the gas target of 20M to account for the gas used by using Safe. - VmSafe.Gas memory gas = vm.lastCallGas(); - assertLt(gas.gasTotalUsed, 0.9 * 20_000_000, "Upgrade exceeds gas target of 15M"); - - // Reset the upgrader's code to the original code. - vm.etch(_delegateCaller, delegateCallerCode); - - // Grab the new implementations. - address newPDGImpl = address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON)); - IPermissionedDisputeGame pdg = IPermissionedDisputeGame(newPDGImpl); - address newFDGImpl = address(disputeGameFactory.gameImpls(GameTypes.CANNON)); - IFaultDisputeGame fdg = IFaultDisputeGame(newFDGImpl); - - // Check that the PermissionedDisputeGame is upgraded to the expected version, references - // the correct anchor state and has the mipsImpl. Although Upgrade 15 doesn't actually - // change any of this, we might as well check it again. - assertEq(ISemver(address(pdg)).version(), "1.7.0"); - assertEq(address(pdg.vm()), impls.mipsImpl); - assertEq(pdg.l2ChainId(), oldPDG.l2ChainId()); - - // If the old FaultDisputeGame exists, we expect it to be upgraded. Check same as above. - if (address(oldFDG) != address(0)) { - assertEq(ISemver(address(fdg)).version(), "1.7.0"); - assertEq(address(fdg.vm()), impls.mipsImpl); - assertEq(fdg.l2ChainId(), oldFDG.l2ChainId()); + /// @notice Executes all past upgrades that have not yet been executed on mainnet as of the + /// current simulation block defined in the justfile for this package. This function + /// might be empty if there are no previous upgrades to execute. You should remove + /// upgrades from this function once they've been executed on mainnet and the + /// simulation block has been bumped beyond the execution block. + /// @param _delegateCaller The address of the delegate caller to use for the upgrade. + function runPastUpgrades(address _delegateCaller) internal { + // Run past upgrades depending on network. + if (block.chainid == 1) { + // Mainnet + // U16a + _runOpcmUpgradeAndChecks( + IOPContractsManager(0x8123739C1368C2DEDc8C564255bc417FEEeBFF9D), _delegateCaller, bytes("") + ); + } else { + revert UnsupportedChainId(); } + } - // Make sure that the SystemConfig is upgraded to the right version. It must also have the - // right l2ChainId and must be properly initialized. - assertEq(ISemver(address(systemConfig)).version(), "3.4.0"); - assertEq(impls.systemConfigImpl, EIP1967Helper.getImplementation(address(systemConfig))); - assertEq(systemConfig.l2ChainId(), l2ChainId); - DeployUtils.assertInitialized({ _contractAddress: address(systemConfig), _isProxy: true, _slot: 0, _offset: 0 }); - - // Make sure that the OptimismPortal is upgraded to the right version. It must also have a - // reference to the new AnchorStateRegistry. - assertEq(ISemver(address(optimismPortal2)).version(), "4.6.0"); - assertEq(impls.optimismPortalImpl, EIP1967Helper.getImplementation(address(optimismPortal2))); - assertEq(address(optimismPortal2.anchorStateRegistry()), address(newAsrProxy)); - DeployUtils.assertInitialized({ - _contractAddress: address(optimismPortal2), - _isProxy: true, - _slot: 0, - _offset: 0 - }); - - // Make sure the new AnchorStateRegistry has the right version and is initialized. - assertEq(ISemver(address(newAsrProxy)).version(), "3.5.0"); - vm.prank(address(proxyAdmin)); - assertEq(IProxy(payable(newAsrProxy)).admin(), address(proxyAdmin)); - DeployUtils.assertInitialized({ _contractAddress: address(newAsrProxy), _isProxy: true, _slot: 0, _offset: 0 }); + /// @notice Executes the current upgrade and checks the results. + /// @param _delegateCaller The address of the delegate caller to use for the upgrade. + function runCurrentUpgrade(address _delegateCaller) public { + _runOpcmUpgradeAndChecks(opcm, _delegateCaller, bytes("")); } - function runUpgradeTestAndChecks(address _delegateCaller) public { - // TODO(#14691): Remove this function once Upgrade 15 is deployed on Mainnet. - runUpgrade13UpgradeAndChecks(_delegateCaller); - // TODO(#14691): Remove this function once Upgrade 15 is deployed on Mainnet. - runUpgrade14UpgradeAndChecks(_delegateCaller); - runUpgrade15UpgradeAndChecks(_delegateCaller); + /// @notice Executes the current upgrade and expects reverts. + /// @param _delegateCaller The address of the delegate caller to use for the upgrade. + /// @param _revertBytes The bytes of the revert to expect. + function runCurrentUpgrade(address _delegateCaller, bytes memory _revertBytes) public { + _runOpcmUpgradeAndChecks(opcm, _delegateCaller, _revertBytes); } } /// @title OPContractsManager_TestInit /// @notice Reusable test initialization for `OPContractsManager` tests. -contract OPContractsManager_TestInit is Test { - IOPContractsManager internal opcm; +contract OPContractsManager_TestInit is CommonTest { + event GameTypeAdded( + uint256 indexed l2ChainId, GameType indexed gameType, IDisputeGame newDisputeGame, IDisputeGame oldDisputeGame + ); + IOPContractsManager.DeployOutput internal chainDeployOutput1; IOPContractsManager.DeployOutput internal chainDeployOutput2; - address challenger = makeAddr("challenger"); - ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); - IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); - IProxyAdmin superchainProxyAdmin = IProxyAdmin(makeAddr("superchainProxyAdmin")); - - function setUp() public virtual { - bytes32 salt = hex"01"; - IOPContractsManager.Blueprints memory blueprints; - (blueprints.addressManager,) = Blueprint.create(vm.getCode("AddressManager"), salt); - (blueprints.proxy,) = Blueprint.create(vm.getCode("Proxy"), salt); - (blueprints.proxyAdmin,) = Blueprint.create(vm.getCode("ProxyAdmin"), salt); - (blueprints.l1ChugSplashProxy,) = Blueprint.create(vm.getCode("L1ChugSplashProxy"), salt); - (blueprints.resolvedDelegateProxy,) = Blueprint.create(vm.getCode("ResolvedDelegateProxy"), salt); - (blueprints.permissionedDisputeGame1, blueprints.permissionedDisputeGame2) = - Blueprint.create(vm.getCode("PermissionedDisputeGame"), salt); - (blueprints.permissionlessDisputeGame1, blueprints.permissionlessDisputeGame2) = - Blueprint.create(vm.getCode("FaultDisputeGame"), salt); - (blueprints.superPermissionedDisputeGame1, blueprints.superPermissionedDisputeGame2) = - Blueprint.create(vm.getCode("SuperPermissionedDisputeGame"), salt); - (blueprints.superPermissionlessDisputeGame1, blueprints.superPermissionlessDisputeGame2) = - Blueprint.create(vm.getCode("SuperFaultDisputeGame"), salt); - - IPreimageOracle oracle = IPreimageOracle( - DeployUtils.create1({ - _name: "PreimageOracle", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IPreimageOracle.__constructor__, (126000, 86400))) - }) - ); - IOPContractsManager.Implementations memory impls = IOPContractsManager.Implementations({ - superchainConfigImpl: DeployUtils.create1({ - _name: "SuperchainConfig", - _args: DeployUtils.encodeConstructor(abi.encodeCall(ISuperchainConfig.__constructor__, ())) - }), - protocolVersionsImpl: DeployUtils.create1({ - _name: "ProtocolVersions", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProtocolVersions.__constructor__, ())) - }), - l1ERC721BridgeImpl: DeployUtils.create1({ - _name: "L1ERC721Bridge", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1ERC721Bridge.__constructor__, ())) - }), - optimismPortalImpl: DeployUtils.create1({ - _name: "OptimismPortal2", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismPortal2.__constructor__, (1))) - }), - ethLockboxImpl: DeployUtils.create1({ - _name: "ETHLockbox", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IETHLockbox.__constructor__, ())) - }), - systemConfigImpl: DeployUtils.create1({ - _name: "SystemConfig", - _args: DeployUtils.encodeConstructor(abi.encodeCall(ISystemConfig.__constructor__, ())) - }), - optimismMintableERC20FactoryImpl: DeployUtils.create1({ - _name: "OptimismMintableERC20Factory", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IOptimismMintableERC20Factory.__constructor__, ())) - }), - l1CrossDomainMessengerImpl: DeployUtils.create1({ - _name: "L1CrossDomainMessenger", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1CrossDomainMessenger.__constructor__, ())) - }), - l1StandardBridgeImpl: DeployUtils.create1({ - _name: "L1StandardBridge", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IL1StandardBridge.__constructor__, ())) - }), - disputeGameFactoryImpl: DeployUtils.create1({ - _name: "DisputeGameFactory", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IDisputeGameFactory.__constructor__, ())) - }), - anchorStateRegistryImpl: DeployUtils.create1({ - _name: "AnchorStateRegistry", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IAnchorStateRegistry.__constructor__, (1))) - }), - delayedWETHImpl: DeployUtils.create1({ - _name: "DelayedWETH", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IDelayedWETH.__constructor__, (3))) - }), - mipsImpl: DeployUtils.create1({ - _name: "MIPS64", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IMIPS2.__constructor__, (oracle, StandardConstants.MIPS_VERSION)) - ) - }) - }); - - vm.etch(address(superchainConfigProxy), hex"01"); - vm.etch(address(protocolVersionsProxy), hex"01"); - - IOPContractsManagerContractsContainer container = IOPContractsManagerContractsContainer( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerContractsContainer", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerContractsContainer.__constructor__, (blueprints, impls)) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ); - - IOPContractsManager.Implementations memory __opcmImplementations = container.implementations(); - IOPContractsManagerStandardValidator.Implementations memory opcmImplementations; - assembly { - opcmImplementations := __opcmImplementations - } - - opcm = IOPContractsManager( - DeployUtils.createDeterministic({ - _name: "OPContractsManager", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IOPContractsManager.__constructor__, - ( - IOPContractsManagerGameTypeAdder( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerGameTypeAdder", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerGameTypeAdder.__constructor__, (container)) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ), - IOPContractsManagerDeployer( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerDeployer", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerDeployer.__constructor__, (container)) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ), - IOPContractsManagerUpgrader( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerUpgrader", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerUpgrader.__constructor__, (container)) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ), - IOPContractsManagerInteropMigrator( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerInteropMigrator", - _args: DeployUtils.encodeConstructor( - abi.encodeCall(IOPContractsManagerInteropMigrator.__constructor__, (container)) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ), - IOPContractsManagerStandardValidator( - DeployUtils.createDeterministic({ - _name: "OPContractsManagerStandardValidator", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IOPContractsManagerStandardValidator.__constructor__, - ( - opcmImplementations, - superchainConfigProxy, - address(superchainProxyAdmin), - challenger, - 100 - ) - ) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ), - superchainConfigProxy, - protocolVersionsProxy, - superchainProxyAdmin, - "dev", - address(this) - ) - ) - ), - _salt: DeployUtils.DEFAULT_SALT - }) - ); + function setUp() public virtual override { + super.setUp(); chainDeployOutput1 = createChainContracts(100); chainDeployOutput2 = createChainContracts(101); - // Mock the SuperchainConfig.paused function to return false. - // Otherwise migration will fail! - // We use abi.encodeWithSignature because paused is overloaded. - // nosemgrep: sol-style-use-abi-encodecall - vm.mockCall(address(superchainConfigProxy), abi.encodeWithSignature("paused(address)"), abi.encode(false)); - - // Fund the lockboxes for testing. vm.deal(address(chainDeployOutput1.ethLockboxProxy), 100 ether); vm.deal(address(chainDeployOutput2.ethLockboxProxy), 100 ether); } + /// @notice Sets up the environment variables for the VerifyOPCM test. + function setupEnvVars() public { + vm.setEnv("EXPECTED_SUPERCHAIN_CONFIG", vm.toString(address(opcm.superchainConfig()))); + vm.setEnv("EXPECTED_PROTOCOL_VERSIONS", vm.toString(address(opcm.protocolVersions()))); + vm.setEnv("EXPECTED_SUPERCHAIN_PROXY_ADMIN", vm.toString(address(opcm.superchainProxyAdmin()))); + } + /// @notice Helper function to deploy a new set of L1 contracts via OPCM. /// @param _l2ChainId The L2 chain ID to deploy the contracts for. /// @return The deployed contracts. @@ -748,6 +368,50 @@ contract OPContractsManager_TestInit is Test { }) ); } + + function addGameType(IOPContractsManager.AddGameInput memory input) + internal + returns (IOPContractsManager.AddGameOutput memory) + { + IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); + inputs[0] = input; + + uint256 l2ChainId = IFaultDisputeGame( + address(IDisputeGameFactory(input.systemConfig.disputeGameFactory()).gameImpls(GameType.wrap(1))) + ).l2ChainId(); + + // Expect the GameTypeAdded event to be emitted. + vm.expectEmit(true, true, true, false, address(this)); + emit GameTypeAdded( + l2ChainId, input.disputeGameType, IDisputeGame(payable(address(0))), IDisputeGame(payable(address(0))) + ); + (bool success, bytes memory rawGameOut) = + address(opcm).delegatecall(abi.encodeCall(IOPContractsManager.addGameType, (inputs))); + assertTrue(success, "addGameType failed"); + + IOPContractsManager.AddGameOutput[] memory addGameOutAll = + abi.decode(rawGameOut, (IOPContractsManager.AddGameOutput[])); + return addGameOutAll[0]; + } + + function newGameInputFactory(GameType _gameType) internal view returns (IOPContractsManager.AddGameInput memory) { + return IOPContractsManager.AddGameInput({ + saltMixer: "hello", + systemConfig: chainDeployOutput1.systemConfigProxy, + proxyAdmin: chainDeployOutput1.opChainProxyAdmin, + delayedWETH: IDelayedWETH(payable(address(0))), + disputeGameType: _gameType, + disputeAbsolutePrestate: Claim.wrap(bytes32(hex"deadbeef1234")), + disputeMaxGameDepth: 73, + disputeSplitDepth: 30, + disputeClockExtension: Duration.wrap(10800), + disputeMaxClockDuration: Duration.wrap(302400), + initialBond: 1 ether, + vm: IBigStepper(address(opcm.implementations().mipsImpl)), + permissioned: _gameType.raw() == GameTypes.PERMISSIONED_CANNON.raw() + || _gameType.raw() == GameTypes.SUPER_PERMISSIONED_CANNON.raw() + }); + } } /// @title OPContractsManager_ChainIdToBatchInboxAddress_Test @@ -761,14 +425,13 @@ contract OPContractsManager_ChainIdToBatchInboxAddress_Test is Test { ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfig")); IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersions")); IProxyAdmin superchainProxyAdmin = IProxyAdmin(makeAddr("superchainProxyAdmin")); - address upgradeController = makeAddr("upgradeController"); OPContractsManager.Blueprints memory emptyBlueprints; OPContractsManager.Implementations memory emptyImpls; vm.etch(address(superchainConfigProxy), hex"01"); vm.etch(address(protocolVersionsProxy), hex"01"); OPContractsManagerContractsContainer container = - new OPContractsManagerContractsContainer(emptyBlueprints, emptyImpls); + new OPContractsManagerContractsContainer(emptyBlueprints, emptyImpls, bytes32(0)); OPContractsManager.Implementations memory __opcmImplementations = container.implementations(); OPContractsManagerStandardValidator.Implementations memory opcmImplementations; @@ -782,13 +445,11 @@ contract OPContractsManager_ChainIdToBatchInboxAddress_Test is Test { _opcmUpgrader: new OPContractsManagerUpgrader(container), _opcmInteropMigrator: new OPContractsManagerInteropMigrator(container), _opcmStandardValidator: new OPContractsManagerStandardValidator( - opcmImplementations, superchainConfigProxy, address(superchainProxyAdmin), challenger, 100 + opcmImplementations, superchainConfigProxy, address(superchainProxyAdmin), challenger, 100, bytes32(0) ), _superchainConfig: superchainConfigProxy, _protocolVersions: protocolVersionsProxy, - _superchainProxyAdmin: superchainProxyAdmin, - _l1ContractsRelease: "dev", - _upgradeController: upgradeController + _superchainProxyAdmin: superchainProxyAdmin }); } @@ -811,14 +472,10 @@ contract OPContractsManager_ChainIdToBatchInboxAddress_Test is Test { /// @title OPContractsManager_AddGameType_Test /// @notice Tests the `addGameType` function of the `OPContractsManager` contract. contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { - event GameTypeAdded( - uint256 indexed l2ChainId, GameType indexed gameType, IDisputeGame newDisputeGame, IDisputeGame oldDisputeGame - ); - /// @notice Tests that we can add a PermissionedDisputeGame implementation with addGameType. function test_addGameType_permissioned_succeeds() public { // Create the input for the Permissioned game type. - IOPContractsManager.AddGameInput memory input = newGameInputFactory(true); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.PERMISSIONED_CANNON); // Run the addGameType call. IOPContractsManager.AddGameOutput memory output = addGameType(input); @@ -837,9 +494,9 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { } /// @notice Tests that we can add a FaultDisputeGame implementation with addGameType. - function test_addGameType_permissionless_succeeds() public { + function test_addGameType_cannon_succeeds() public { // Create the input for the Permissionless game type. - IOPContractsManager.AddGameInput memory input = newGameInputFactory(false); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON); // Run the addGameType call. IOPContractsManager.AddGameOutput memory output = addGameType(input); @@ -859,8 +516,7 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { /// @notice Tests that we can add a SuperPermissionedDisputeGame implementation with addGameType. function test_addGameType_permissionedSuper_succeeds() public { // Create the input for the Super game type. - IOPContractsManager.AddGameInput memory input = newGameInputFactory(true); - input.disputeGameType = GameTypes.SUPER_PERMISSIONED_CANNON; + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_PERMISSIONED_CANNON); // Since OPCM will start with the standard Permissioned (non-Super) game type we won't have // a Super dispute game to grab the proposer and challenger from. In production we'd either @@ -898,10 +554,9 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { } /// @notice Tests that we can add a SuperFaultDisputeGame implementation with addGameType. - function test_addGameType_permissionlessSuper_succeeds() public { + function test_addGameType_superCannon_succeeds() public { // Create the input for the Super game type. - IOPContractsManager.AddGameInput memory input = newGameInputFactory(false); - input.disputeGameType = GameTypes.SUPER_CANNON; + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_CANNON); // Run the addGameType call. IOPContractsManager.AddGameOutput memory output = addGameType(input); @@ -921,8 +576,31 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { /// @notice Tests that addGameType will revert if the game type is not supported. function test_addGameType_unsupportedGameType_reverts() public { - IOPContractsManager.AddGameInput memory input = newGameInputFactory(false); - input.disputeGameType = GameType.wrap(2000); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameType.wrap(2000)); + + // Run the addGameType call, should revert. + IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); + inputs[0] = input; + (bool success,) = address(opcm).delegatecall(abi.encodeCall(IOPContractsManager.addGameType, (inputs))); + assertFalse(success, "addGameType should have failed"); + } + + /// @notice Tests that addGameType will revert if the game type is cannon-kona and the dev feature is not enabled + function test_addGameType_cannonKonaGameTypeDisabled_reverts() public { + skipIfDevFeatureEnabled(DevFeatures.CANNON_KONA); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON_KONA); + + // Run the addGameType call, should revert. + IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); + inputs[0] = input; + (bool success,) = address(opcm).delegatecall(abi.encodeCall(IOPContractsManager.addGameType, (inputs))); + assertFalse(success, "addGameType should have failed"); + } + + /// @notice Tests that addGameType will revert if the game type is cannon-kona and the dev feature is not enabled + function test_addGameType_superCannonKonaGameTypeDisabled_reverts() public { + skipIfDevFeatureEnabled(DevFeatures.CANNON_KONA); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_CANNON_KONA); // Run the addGameType call, should revert. IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); @@ -943,7 +621,7 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { ) ); vm.etch(address(delayedWETH), hex"01"); - IOPContractsManager.AddGameInput memory input = newGameInputFactory(false); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON); input.delayedWETH = delayedWETH; IOPContractsManager.AddGameOutput memory output = addGameType(input); assertValidGameType(input, output); @@ -951,10 +629,8 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { } function test_addGameType_outOfOrderInputs_reverts() public { - IOPContractsManager.AddGameInput memory input1 = newGameInputFactory(false); - input1.disputeGameType = GameType.wrap(2); - IOPContractsManager.AddGameInput memory input2 = newGameInputFactory(false); - input2.disputeGameType = GameType.wrap(1); + IOPContractsManager.AddGameInput memory input1 = newGameInputFactory(GameType.wrap(2)); + IOPContractsManager.AddGameInput memory input2 = newGameInputFactory(GameType.wrap(1)); IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](2); inputs[0] = input1; inputs[1] = input2; @@ -965,7 +641,7 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { } function test_addGameType_duplicateGameType_reverts() public { - IOPContractsManager.AddGameInput memory input = newGameInputFactory(false); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON); IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](2); inputs[0] = input; inputs[1] = input; @@ -987,7 +663,7 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { } function test_addGameType_notDelegateCall_reverts() public { - IOPContractsManager.AddGameInput memory input = newGameInputFactory(true); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.PERMISSIONED_CANNON); IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); inputs[0] = input; @@ -995,49 +671,6 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { opcm.addGameType(inputs); } - function addGameType(IOPContractsManager.AddGameInput memory input) - internal - returns (IOPContractsManager.AddGameOutput memory) - { - IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); - inputs[0] = input; - - uint256 l2ChainId = IFaultDisputeGame( - address(IDisputeGameFactory(input.systemConfig.disputeGameFactory()).gameImpls(GameType.wrap(1))) - ).l2ChainId(); - - // Expect the GameTypeAdded event to be emitted. - vm.expectEmit(true, true, false, false, address(this)); - emit GameTypeAdded( - l2ChainId, input.disputeGameType, IDisputeGame(payable(address(0))), IDisputeGame(payable(address(0))) - ); - (bool success, bytes memory rawGameOut) = - address(opcm).delegatecall(abi.encodeCall(IOPContractsManager.addGameType, (inputs))); - assertTrue(success, "addGameType failed"); - - IOPContractsManager.AddGameOutput[] memory addGameOutAll = - abi.decode(rawGameOut, (IOPContractsManager.AddGameOutput[])); - return addGameOutAll[0]; - } - - function newGameInputFactory(bool permissioned) internal view returns (IOPContractsManager.AddGameInput memory) { - return IOPContractsManager.AddGameInput({ - saltMixer: "hello", - systemConfig: chainDeployOutput1.systemConfigProxy, - proxyAdmin: chainDeployOutput1.opChainProxyAdmin, - delayedWETH: IDelayedWETH(payable(address(0))), - disputeGameType: GameType.wrap(permissioned ? 1 : 0), - disputeAbsolutePrestate: Claim.wrap(bytes32(hex"deadbeef1234")), - disputeMaxGameDepth: 73, - disputeSplitDepth: 30, - disputeClockExtension: Duration.wrap(10800), - disputeMaxClockDuration: Duration.wrap(302400), - initialBond: 1 ether, - vm: IBigStepper(address(opcm.implementations().mipsImpl)), - permissioned: permissioned - }); - } - function assertValidGameType( IOPContractsManager.AddGameInput memory agi, IOPContractsManager.AddGameOutput memory ago @@ -1086,6 +719,49 @@ contract OPContractsManager_AddGameType_Test is OPContractsManager_TestInit { chainDeployOutput1.disputeGameFactoryProxy.initBonds(agi.disputeGameType), agi.initialBond, "bond mismatch" ); } + + /// @notice Tests that addGameType will revert if the game type is cannon-kona and the dev feature is not enabled + function test_addGameType_cannonKonaGameType_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + // Create the input for the cannon-kona game type. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON_KONA); + + // Run the addGameType call. + IOPContractsManager.AddGameOutput memory output = addGameType(input); + assertValidGameType(input, output); + + // Check the values on the new game type. + IPermissionedDisputeGame notPDG = IPermissionedDisputeGame(address(output.faultDisputeGame)); + + // Proposer call should revert because this is a permissionless game. + vm.expectRevert(); // nosemgrep: sol-safety-expectrevert-no-args + notPDG.proposer(); + + // L2 chain ID call should not revert because this is not a Super game. + assertNotEq(notPDG.l2ChainId(), 0, "l2ChainId should not be zero"); + } + + /// @notice Tests that addGameType will revert if the game type is cannon-kona and the dev feature is not enabled + function test_addGameType_superCannonKonaGameType_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + // Create the input for the cannon-kona game type. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_CANNON_KONA); + + // Run the addGameType call. + IOPContractsManager.AddGameOutput memory output = addGameType(input); + assertValidGameType(input, output); + + // Grab the new game type. + IPermissionedDisputeGame notPDG = IPermissionedDisputeGame(address(output.faultDisputeGame)); + + // Proposer should fail, this is a permissionless game. + vm.expectRevert(); // nosemgrep: sol-safety-expectrevert-no-args + notPDG.proposer(); + + // Super games don't have the l2ChainId function. + vm.expectRevert(); // nosemgrep: sol-safety-expectrevert-no-args + notPDG.l2ChainId(); + } } /// @title OPContractsManager_UpdatePrestate_Test @@ -1094,29 +770,56 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { IOPContractsManager internal prestateUpdater; OPContractsManager.AddGameInput[] internal gameInput; - function setUp() public override { + function setUp() public virtual override { super.setUp(); prestateUpdater = opcm; } - /// @notice Tests that we can update the prestate when only the PermissionedDisputeGame exists. - function test_updatePrestate_pdgOnlyWithValidInput_succeeds() public { - // Create the input for the function call. - Claim prestate = Claim.wrap(bytes32(hex"ABBA")); - IOPContractsManager.OpChainConfig[] memory inputs = new IOPContractsManager.OpChainConfig[](1); - inputs[0] = IOPContractsManager.OpChainConfig( - chainDeployOutput1.systemConfigProxy, chainDeployOutput1.opChainProxyAdmin, prestate - ); + /// @notice Runs the OPCM updatePrestate function and checks the results. + /// @param _input The input to the OPCM updatePrestate function. + function _runUpdatePrestateAndChecks(IOPContractsManager.UpdatePrestateInput memory _input) internal { + _runUpdatePrestateAndChecks(_input, bytes("")); + } + + /// @notice Runs the OPCM updatePrestate function and checks the results. + /// @param _input The input to the OPCM updatePrestate function. + /// @param _revertBytes The bytes of the revert to expect, if any. + function _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput memory _input, + bytes memory _revertBytes + ) + internal + { + bool expectCannonUpdated = address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls(GameTypes.CANNON) + ) != address(0); + bool expectCannonKonaUpdated = address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.CANNON_KONA + ) + ) != address(0); // Turn the ProxyAdmin owner into a DelegateCaller. address proxyAdminOwner = chainDeployOutput1.opChainProxyAdmin.owner(); vm.etch(address(proxyAdminOwner), vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + IOPContractsManager.UpdatePrestateInput[] memory inputs = new IOPContractsManager.UpdatePrestateInput[](1); + inputs[0] = _input; + + if (_revertBytes.length > 0) { + vm.expectRevert(_revertBytes); + } + // Trigger the updatePrestate function. DelegateCaller(proxyAdminOwner).dcForward( address(prestateUpdater), abi.encodeCall(IOPContractsManager.updatePrestate, (inputs)) ); + // Return early if a revert was expected. Otherwise we'll get errors below. + if (_revertBytes.length > 0) { + return; + } + // Grab the PermissionedDisputeGame. IPermissionedDisputeGame pdg = IPermissionedDisputeGame( address( @@ -1125,27 +828,123 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { ) ) ); + assertEq(pdg.absolutePrestate().raw(), _input.cannonPrestate.raw(), "permissioned game prestate mismatch"); + // Ensure that the WETH contracts are not reverting + pdg.weth().balanceOf(address(0)); + + if (expectCannonUpdated) { + IPermissionedDisputeGame game = IPermissionedDisputeGame( + address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.CANNON + ) + ) + ); + assertEq(game.absolutePrestate().raw(), _input.cannonPrestate.raw(), "cannon game prestate mismatch"); + // Ensure that the WETH contracts are not reverting + game.weth().balanceOf(address(0)); + } else { + assertEq( + address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.CANNON + ) + ), + (address(0)), + "cannon game should not exist" + ); + } + + if (expectCannonKonaUpdated) { + IPermissionedDisputeGame game = IPermissionedDisputeGame( + address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.CANNON_KONA + ) + ) + ); + assertEq(game.absolutePrestate().raw(), _input.cannonKonaPrestate.raw(), "cannon game prestate mismatch"); + // Ensure that the WETH contracts are not reverting + game.weth().balanceOf(address(0)); + } else { + assertEq( + address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.CANNON_KONA + ) + ), + (address(0)), + "cannon_kona game should not exist" + ); + } + } + + /// @notice Tests that we can update the prestate when only the PermissionedDisputeGame exists. + function test_updatePrestate_pdgOnlyWithValidInput_succeeds() public { + Claim prestate = Claim.wrap(bytes32(hex"ABBA")); + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput( + chainDeployOutput1.systemConfigProxy, prestate, Claim.wrap(bytes32(0)) + ) + ); + } + + /// @notice Tests that we can update the prestate when both the PermissionedDisputeGame and + /// FaultDisputeGame exist. + function test_updatePrestate_bothGamesWithValidInput_succeeds() public { + // Add a FaultDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON); + addGameType(input); + + Claim prestate = Claim.wrap(bytes32(hex"ABBA")); + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput( + chainDeployOutput1.systemConfigProxy, prestate, Claim.wrap(bytes32(0)) + ) + ); + } + + /// @notice Tests that we can update the prestate when a SuperFaultDisputeGame exists. Note + /// that this test isn't ideal because the system starts with a PermissionedDisputeGame + /// and then adds a SuperPermissionedDisputeGame and SuperFaultDisputeGame. In the real + /// system we wouldn't have that PermissionedDisputeGame to start with, but it + /// shouldn't matter because the function is independent of other game types that + /// exist. + function test_updatePrestate_withSuperGame_succeeds() public { + // Mock out the existence of a previous SuperPermissionedDisputeGame so we can add a real + // SuperPermissionedDisputeGame implementation. + vm.mockCall( + address(chainDeployOutput1.disputeGameFactoryProxy), + abi.encodeCall(IDisputeGameFactory.gameImpls, (GameTypes.SUPER_PERMISSIONED_CANNON)), + abi.encode(chainDeployOutput1.permissionedDisputeGame) + ); + vm.mockCall( + address(chainDeployOutput1.permissionedDisputeGame), + abi.encodeCall(IDisputeGame.gameType, ()), + abi.encode(GameTypes.SUPER_PERMISSIONED_CANNON) + ); - // Check the prestate value. - assertEq(pdg.absolutePrestate().raw(), prestate.raw(), "pdg prestate mismatch"); + // Add a SuperPermissionedDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input1 = newGameInputFactory(GameTypes.SUPER_PERMISSIONED_CANNON); + addGameType(input1); + vm.clearMockedCalls(); - // Ensure that the WETH contract is not reverting. - pdg.weth().balanceOf(address(0)); - } + // Add a SuperFaultDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input2 = newGameInputFactory(GameTypes.SUPER_CANNON); + addGameType(input2); - /// @notice Tests that we can update the prestate when both the PermissionedDisputeGame and - /// FaultDisputeGame exist. - function test_updatePrestate_bothGamesWithValidInput_succeeds() public { - // Add a FaultDisputeGame implementation via addGameType. - IOPContractsManager.AddGameInput memory input = newGameInputFactory({ permissioned: false }); - input.disputeGameType = GameTypes.CANNON; - addGameType(input); + // Clear out the PermissionedDisputeGame implementation. + address owner = chainDeployOutput1.disputeGameFactoryProxy.owner(); + vm.prank(owner); + chainDeployOutput1.disputeGameFactoryProxy.setImplementation( + GameTypes.PERMISSIONED_CANNON, IDisputeGame(payable(address(0))) + ); // Create the input for the function call. Claim prestate = Claim.wrap(bytes32(hex"ABBA")); - IOPContractsManager.OpChainConfig[] memory inputs = new IOPContractsManager.OpChainConfig[](1); - inputs[0] = IOPContractsManager.OpChainConfig( - chainDeployOutput1.systemConfigProxy, chainDeployOutput1.opChainProxyAdmin, prestate + IOPContractsManager.UpdatePrestateInput[] memory inputs = new IOPContractsManager.UpdatePrestateInput[](1); + inputs[0] = IOPContractsManager.UpdatePrestateInput( + chainDeployOutput1.systemConfigProxy, prestate, Claim.wrap(bytes32(0)) ); // Turn the ProxyAdmin owner into a DelegateCaller. @@ -1157,20 +956,20 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { address(prestateUpdater), abi.encodeCall(IOPContractsManager.updatePrestate, (inputs)) ); - // Grab the PermissionedDisputeGame. + // Grab the SuperPermissionedDisputeGame. IPermissionedDisputeGame pdg = IPermissionedDisputeGame( address( IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( - GameTypes.PERMISSIONED_CANNON + GameTypes.SUPER_PERMISSIONED_CANNON ) ) ); - // Grab the FaultDisputeGame. + // Grab the SuperFaultDisputeGame. IPermissionedDisputeGame fdg = IPermissionedDisputeGame( address( IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( - GameTypes.CANNON + GameTypes.SUPER_CANNON ) ) ); @@ -1184,13 +983,74 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { fdg.weth().balanceOf(address(0)); } - /// @notice Tests that we can update the prestate when a SuperFaultDisputeGame exists. Note - /// that this test isn't ideal because the system starts with a PermissionedDisputeGame - /// and then adds a SuperPermissionedDisputeGame and SuperFaultDisputeGame. In the real - /// system we wouldn't have that PermissionedDisputeGame to start with, but it - /// shouldn't matter because the function is independent of other game types that - /// exist. - function test_updatePrestate_withSuperGame_succeeds() public { + /// @notice Tests that the updatePrestate function will revert if the provided prestate is for + /// mixed game types (i.e. CANNON and SUPER_CANNON). + function test_updatePrestate_mixedGameTypes_reverts() public { + // Add a SuperFaultDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_CANNON); + addGameType(input); + + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(hex"ABBA")), + cannonKonaPrestate: Claim.wrap(bytes32(0)) + }), + abi.encodeWithSelector( + IOPContractsManagerGameTypeAdder.OPContractsManagerGameTypeAdder_MixedGameTypes.selector + ) + ); + } + + /// @notice Tests that the updatePrestate function will revert if the provided prestate is the + /// zero hash. + function test_updatePrestate_whenPDGPrestateIsZero_reverts() public { + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(0)), + cannonKonaPrestate: Claim.wrap(bytes32(0)) + }), + abi.encodeWithSelector(IOPContractsManager.PrestateRequired.selector) + ); + } + + function test_updatePrestate_whenOnlyCannonPrestateIsZeroAndCannonGameTypeDisabled_reverts() public { + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(0)), + cannonKonaPrestate: Claim.wrap(bytes32(hex"ABBA")) + }), + abi.encodeWithSelector(IOPContractsManager.PrestateRequired.selector) + ); + } + + /// @notice Tests that we can update the prestate for both CANNON and CANNON_KONA game types. + function test_updatePrestate_bothGamesAndCannonKonaWithValidInput_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + // Add a FaultDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON); + addGameType(input); + input = newGameInputFactory(GameTypes.CANNON_KONA); + addGameType(input); + + Claim cannonPrestate = Claim.wrap(bytes32(hex"ABBA")); + Claim cannonKonaPrestate = Claim.wrap(bytes32(hex"ADDA")); + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: cannonPrestate, + cannonKonaPrestate: cannonKonaPrestate + }) + ); + } + + function test_updatePrestate_cannonKonaWithSuperGame_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); // Mock out the existence of a previous SuperPermissionedDisputeGame so we can add a real // SuperPermissionedDisputeGame implementation. vm.mockCall( @@ -1205,15 +1065,15 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { ); // Add a SuperPermissionedDisputeGame implementation via addGameType. - IOPContractsManager.AddGameInput memory input1 = newGameInputFactory({ permissioned: true }); - input1.disputeGameType = GameTypes.SUPER_PERMISSIONED_CANNON; + IOPContractsManager.AddGameInput memory input1 = newGameInputFactory(GameTypes.SUPER_PERMISSIONED_CANNON); addGameType(input1); vm.clearMockedCalls(); // Add a SuperFaultDisputeGame implementation via addGameType. - IOPContractsManager.AddGameInput memory input2 = newGameInputFactory({ permissioned: false }); - input2.disputeGameType = GameTypes.SUPER_CANNON; + IOPContractsManager.AddGameInput memory input2 = newGameInputFactory(GameTypes.SUPER_CANNON); addGameType(input2); + IOPContractsManager.AddGameInput memory input3 = newGameInputFactory(GameTypes.SUPER_CANNON_KONA); + addGameType(input3); // Clear out the PermissionedDisputeGame implementation. address owner = chainDeployOutput1.disputeGameFactoryProxy.owner(); @@ -1223,11 +1083,14 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { ); // Create the input for the function call. - Claim prestate = Claim.wrap(bytes32(hex"ABBA")); - IOPContractsManager.OpChainConfig[] memory inputs = new IOPContractsManager.OpChainConfig[](1); - inputs[0] = IOPContractsManager.OpChainConfig( - chainDeployOutput1.systemConfigProxy, chainDeployOutput1.opChainProxyAdmin, prestate - ); + Claim cannonPrestate = Claim.wrap(bytes32(hex"ABBA")); + Claim cannonKonaPrestate = Claim.wrap(bytes32(hex"ABBA")); + IOPContractsManager.UpdatePrestateInput[] memory inputs = new IOPContractsManager.UpdatePrestateInput[](1); + inputs[0] = IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: cannonPrestate, + cannonKonaPrestate: cannonKonaPrestate + }); // Turn the ProxyAdmin owner into a DelegateCaller. address proxyAdminOwner = chainDeployOutput1.opChainProxyAdmin.owner(); @@ -1238,7 +1101,6 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { address(prestateUpdater), abi.encodeCall(IOPContractsManager.updatePrestate, (inputs)) ); - // Grab the SuperPermissionedDisputeGame. IPermissionedDisputeGame pdg = IPermissionedDisputeGame( address( IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( @@ -1247,102 +1109,105 @@ contract OPContractsManager_UpdatePrestate_Test is OPContractsManager_TestInit { ) ); - // Grab the SuperFaultDisputeGame. - IPermissionedDisputeGame fdg = IPermissionedDisputeGame( + IFaultDisputeGame fdg = IFaultDisputeGame( address( IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( GameTypes.SUPER_CANNON ) ) ); + IFaultDisputeGame fdgKona = IFaultDisputeGame( + address( + IDisputeGameFactory(chainDeployOutput1.systemConfigProxy.disputeGameFactory()).gameImpls( + GameTypes.SUPER_CANNON_KONA + ) + ) + ); // Check the prestate values. - assertEq(pdg.absolutePrestate().raw(), prestate.raw(), "pdg prestate mismatch"); - assertEq(fdg.absolutePrestate().raw(), prestate.raw(), "fdg prestate mismatch"); + assertEq(pdg.absolutePrestate().raw(), cannonPrestate.raw(), "pdg prestate mismatch"); + assertEq(fdg.absolutePrestate().raw(), cannonPrestate.raw(), "fdg prestate mismatch"); + assertEq(fdgKona.absolutePrestate().raw(), cannonKonaPrestate.raw(), "fdgKona prestate mismatch"); // Ensure that the WETH contracts are not reverting pdg.weth().balanceOf(address(0)); fdg.weth().balanceOf(address(0)); + fdgKona.weth().balanceOf(address(0)); } - function test_updatePrestate_mixedGameTypes_reverts() public { - // Add a SuperFaultDisputeGame implementation via addGameType. - IOPContractsManager.AddGameInput memory input = newGameInputFactory({ permissioned: false }); - input.disputeGameType = GameTypes.SUPER_CANNON; + /// @notice Tests that we can update the prestate when both the PermissionedDisputeGame and + /// FaultDisputeGame exist, and the FaultDisputeGame is of type CANNON_KONA. + function test_updatePrestate_pdgAndCannonKonaOnly_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON_KONA); addGameType(input); - // Create the input for the function call. - Claim prestate = Claim.wrap(bytes32(hex"ABBA")); - IOPContractsManager.OpChainConfig[] memory inputs = new IOPContractsManager.OpChainConfig[](1); - inputs[0] = IOPContractsManager.OpChainConfig( - chainDeployOutput1.systemConfigProxy, chainDeployOutput1.opChainProxyAdmin, prestate + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(hex"ABBA")), + cannonKonaPrestate: Claim.wrap(bytes32(hex"ADDA")) + }) ); + } - // Turn the ProxyAdmin owner into a DelegateCaller. - address proxyAdminOwner = chainDeployOutput1.opChainProxyAdmin.owner(); - vm.etch(address(proxyAdminOwner), vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + /// @notice Tests that the updatePrestate function will revert if the provided prestate is for + /// mixed game types (i.e. CANNON and SUPER_CANNON_KONA). + function test_updatePrestate_cannonKonaMixedGameTypes_reverts() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + // Add a SuperFaultDisputeGame implementation via addGameType. + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.SUPER_CANNON_KONA); + addGameType(input); - // Trigger the updatePrestate function, should revert. - vm.expectRevert(IOPContractsManagerGameTypeAdder.OPContractsManagerGameTypeAdder_MixedGameTypes.selector); - DelegateCaller(proxyAdminOwner).dcForward( - address(prestateUpdater), abi.encodeCall(IOPContractsManager.updatePrestate, (inputs)) + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(hex"ABBA")), + cannonKonaPrestate: Claim.wrap(hex"ADDA") + }), + abi.encodeWithSelector( + IOPContractsManagerGameTypeAdder.OPContractsManagerGameTypeAdder_MixedGameTypes.selector + ) ); } /// @notice Tests that the updatePrestate function will revert if the provided prestate is the /// zero hash. - function test_updatePrestate_whenPDGPrestateIsZero_reverts() public { - // Create the input for the function call. - IOPContractsManager.OpChainConfig[] memory inputs = new IOPContractsManager.OpChainConfig[](1); - inputs[0] = IOPContractsManager.OpChainConfig({ - systemConfigProxy: chainDeployOutput1.systemConfigProxy, - proxyAdmin: chainDeployOutput1.opChainProxyAdmin, - absolutePrestate: Claim.wrap(bytes32(0)) - }); - - // Turn the ProxyAdmin owner into a DelegateCaller. - address proxyAdminOwner = chainDeployOutput1.opChainProxyAdmin.owner(); - vm.etch(address(proxyAdminOwner), vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + function test_updatePrestate_presetCannonKonaWhenOnlyCannonPrestateIsZeroAndCannonGameTypeDisabled_reverts() + public + { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON_KONA); + addGameType(input); - // Trigger the updatePrestate function, should revert. - vm.expectRevert(IOPContractsManager.PrestateRequired.selector); - DelegateCaller(proxyAdminOwner).dcForward( - address(prestateUpdater), abi.encodeCall(IOPContractsManager.updatePrestate, (inputs)) + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(0)), + cannonKonaPrestate: Claim.wrap(bytes32(hex"ABBA")) + }), + abi.encodeWithSelector(IOPContractsManager.PrestateRequired.selector) ); } - function addGameType(IOPContractsManager.AddGameInput memory input) - internal - returns (IOPContractsManager.AddGameOutput memory) - { - IOPContractsManager.AddGameInput[] memory inputs = new IOPContractsManager.AddGameInput[](1); - inputs[0] = input; - - (bool success, bytes memory rawGameOut) = - address(opcm).delegatecall(abi.encodeCall(IOPContractsManager.addGameType, (inputs))); - assertTrue(success, "addGameType failed"); - - IOPContractsManager.AddGameOutput[] memory addGameOutAll = - abi.decode(rawGameOut, (IOPContractsManager.AddGameOutput[])); - return addGameOutAll[0]; - } + /// @notice Tests that the updatePrestate function will revert if the provided prestate is the + /// zero hash. + function test_updatePrestate_whenCannonKonaPrestateIsZero_reverts() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + IOPContractsManager.AddGameInput memory input = newGameInputFactory(GameTypes.CANNON_KONA); + addGameType(input); - function newGameInputFactory(bool permissioned) internal view returns (IOPContractsManager.AddGameInput memory) { - return IOPContractsManager.AddGameInput({ - saltMixer: "hello", - systemConfig: chainDeployOutput1.systemConfigProxy, - proxyAdmin: chainDeployOutput1.opChainProxyAdmin, - delayedWETH: IDelayedWETH(payable(address(0))), - disputeGameType: GameType.wrap(permissioned ? 1 : 0), - disputeAbsolutePrestate: Claim.wrap(bytes32(hex"deadbeef1234")), - disputeMaxGameDepth: 73, - disputeSplitDepth: 30, - disputeClockExtension: Duration.wrap(10800), - disputeMaxClockDuration: Duration.wrap(302400), - initialBond: 1 ether, - vm: IBigStepper(address(opcm.implementations().mipsImpl)), - permissioned: permissioned - }); + // nosemgrep: sol-style-use-abi-encodecall + _runUpdatePrestateAndChecks( + IOPContractsManager.UpdatePrestateInput({ + systemConfigProxy: chainDeployOutput1.systemConfigProxy, + cannonPrestate: Claim.wrap(bytes32(hex"ABBA")), + cannonKonaPrestate: Claim.wrap(bytes32(0)) + }), + abi.encodeWithSelector(IOPContractsManager.PrestateRequired.selector) + ); } } @@ -1352,70 +1217,34 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { function setUp() public override { skipIfNotOpFork("OPContractsManager_Upgrade_Test"); super.setUp(); + + // Run all past upgrades. + runPastUpgrades(upgrader); } function test_upgradeOPChainOnly_succeeds() public { // Run the upgrade test and checks - runUpgradeTestAndChecks(upgrader); + runCurrentUpgrade(upgrader); } function test_verifyOpcmCorrectness_succeeds() public { skipIfCoverage(); // Coverage changes bytecode and breaks the verification script. + // Set up environment variables with the actual OPCM addresses for tests that need themqq + vm.setEnv("EXPECTED_SUPERCHAIN_CONFIG", vm.toString(address(opcm.superchainConfig()))); + vm.setEnv("EXPECTED_PROTOCOL_VERSIONS", vm.toString(address(opcm.protocolVersions()))); + vm.setEnv("EXPECTED_SUPERCHAIN_PROXY_ADMIN", vm.toString(address(opcm.superchainProxyAdmin()))); + // Run the upgrade test and checks - runUpgradeTestAndChecks(upgrader); + runCurrentUpgrade(upgrader); - // Run the verification script without etherscan verificatin. Hard to run with etherscan + // Run the verification script without etherscan verification. Hard to run with etherscan // verification in these tests, can do it but means we add even more dependencies to the // test environment. VerifyOPCM verify = new VerifyOPCM(); verify.run(address(opcm), true); } - function test_isRcFalseAfterCalledByUpgrader_works() public { - assertTrue(opcm.isRC()); - bytes memory releaseBytes = bytes(opcm.l1ContractsRelease()); - assertEq(Bytes.slice(releaseBytes, releaseBytes.length - 3, 3), "-rc", "release should end with '-rc'"); - - runUpgradeTestAndChecks(upgrader); - - assertFalse(opcm.isRC(), "isRC should be false"); - releaseBytes = bytes(opcm.l1ContractsRelease()); - assertNotEq(Bytes.slice(releaseBytes, releaseBytes.length - 3, 3), "-rc", "release should not end with '-rc'"); - } - - function testFuzz_upgrade_nonUpgradeControllerDelegatecallerShouldNotSetIsRCToFalse_works( - address _nonUpgradeController - ) - public - { - if ( - _nonUpgradeController == upgrader || _nonUpgradeController == address(0) - || _nonUpgradeController < address(0x4200000000000000000000000000000000000000) - || _nonUpgradeController > address(0x4200000000000000000000000000000000000800) - || _nonUpgradeController == address(vm) - || _nonUpgradeController == 0x000000000000000000636F6e736F6c652e6c6f67 - || _nonUpgradeController == 0x4e59b44847b379578588920cA78FbF26c0B4956C - ) { - _nonUpgradeController = makeAddr("nonUpgradeController"); - } - - // Set the proxy admin owner to be the non-upgrade controller - vm.store( - address(proxyAdmin), - bytes32(ForgeArtifacts.getSlot("ProxyAdmin", "_owner").slot), - bytes32(uint256(uint160(_nonUpgradeController))) - ); - vm.store( - address(disputeGameFactory), - bytes32(ForgeArtifacts.getSlot("DisputeGameFactory", "_owner").slot), - bytes32(uint256(uint160(_nonUpgradeController))) - ); - - // Run the upgrade test and checks - runUpgradeTestAndChecks(_nonUpgradeController); - } - function test_upgrade_duplicateL2ChainId_succeeds() public { // Deploy a new OPChain with the same L2 chain ID as the current OPChain Deploy deploy = Deploy(address(uint160(uint256(keccak256(abi.encode("optimism.deploy")))))); @@ -1425,16 +1254,11 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { opcm.deploy(deployInput); // Try to upgrade the current OPChain - runUpgradeTestAndChecks(upgrader); + runCurrentUpgrade(upgrader); } /// @notice Tests that the absolute prestate can be overridden using the upgrade config. function test_upgrade_absolutePrestateOverride_succeeds() public { - // Run Upgrade 13 and 14 to get us to a state where we can run Upgrade 15. - // Can remove these two calls as Upgrade 13 and 14 are executed in prod. - runUpgrade13UpgradeAndChecks(upgrader); - runUpgrade14UpgradeAndChecks(upgrader); - // Get the pdg and fdg before the upgrade Claim pdgPrestateBefore = IPermissionedDisputeGame( address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON)) @@ -1449,8 +1273,8 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { // Set the absolute prestate input to something non-zero. opChainConfigs[0].absolutePrestate = Claim.wrap(bytes32(uint256(1))); - // Now run Upgrade 15. - runUpgrade15UpgradeAndChecks(upgrader); + // Run the upgrade. + runCurrentUpgrade(upgrader); // Get the absolute prestate after the upgrade Claim pdgPrestateAfter = IPermissionedDisputeGame( @@ -1467,11 +1291,6 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { /// @notice Tests that the old absolute prestate is used if the upgrade config does not set an /// absolute prestate. function test_upgrade_absolutePrestateNotSet_succeeds() public { - // Run Upgrade 13 and 14 to get us to a state where we can run Upgrade 15. - // Can remove these two calls as Upgrade 13 and 14 are executed in prod. - runUpgrade13UpgradeAndChecks(upgrader); - runUpgrade14UpgradeAndChecks(upgrader); - // Get the pdg and fdg before the upgrade Claim pdgPrestateBefore = IPermissionedDisputeGame( address(disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON)) @@ -1486,8 +1305,8 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { // Set the absolute prestate input to zero. opChainConfigs[0].absolutePrestate = Claim.wrap(bytes32(0)); - // Now run Upgrade 15. - runUpgrade15UpgradeAndChecks(upgrader); + // Run the upgrade. + runCurrentUpgrade(upgrader); // Get the absolute prestate after the upgrade Claim pdgPrestateAfter = IPermissionedDisputeGame( @@ -1502,33 +1321,24 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { } function test_upgrade_notDelegateCalled_reverts() public { - runUpgrade13UpgradeAndChecks(upgrader); - vm.prank(upgrader); vm.expectRevert(IOPContractsManager.OnlyDelegatecall.selector); opcm.upgrade(opChainConfigs); } function test_upgrade_notProxyAdminOwner_reverts() public { - runUpgrade13UpgradeAndChecks(upgrader); - address delegateCaller = makeAddr("delegateCaller"); vm.etch(delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); assertNotEq(superchainProxyAdmin.owner(), delegateCaller); assertNotEq(proxyAdmin.owner(), delegateCaller); - vm.expectRevert("Ownable: caller is not the owner"); - DelegateCaller(delegateCaller).dcForward( - address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs)) - ); + runCurrentUpgrade(delegateCaller, bytes("Ownable: caller is not the owner")); } /// @notice Tests that upgrade reverts when absolutePrestate is zero and the existing game also /// has an absolute prestate of zero. function test_upgrade_absolutePrestateNotSet_reverts() public { - runUpgrade13UpgradeAndChecks(upgrader); - // Set the config to try to update the absolutePrestate to zero. opChainConfigs[0].absolutePrestate = Claim.wrap(bytes32(0)); @@ -1544,8 +1354,96 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { ); // Expect the upgrade to revert with PrestateNotSet. - vm.expectRevert(IOPContractsManager.PrestateNotSet.selector); - DelegateCaller(upgrader).dcForward(address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChainConfigs))); + // nosemgrep: sol-style-use-abi-encodecall + runCurrentUpgrade(upgrader, abi.encodeWithSelector(IOPContractsManager.PrestateNotSet.selector)); + } + + /// @notice Tests that the upgrade function reverts when the superchainConfig is not at the expected target version. + function test_upgrade_superchainConfigNeedsUpgrade_reverts() public { + // Force the SuperchainConfig to return an obviously outdated version. + vm.mockCall(address(superchainConfig), abi.encodeCall(ISuperchainConfig.version, ()), abi.encode("0.0.0")); + + // Try upgrading an OPChain without upgrading its superchainConfig. + // nosemgrep: sol-style-use-abi-encodecall + runCurrentUpgrade( + upgrader, + abi.encodeWithSelector( + IOPContractsManagerUpgrader.OPContractsManagerUpgrader_SuperchainConfigNeedsUpgrade.selector, (0) + ) + ); + } +} + +contract OPContractsManager_UpgradeSuperchainConfig_Test is OPContractsManager_Upgrade_Harness { + function setUp() public override { + super.setUp(); + + // The superchainConfig is already at the expected version so we mock this call here to bypass that check and + // get our expected error. + vm.mockCall(address(superchainConfig), abi.encodeCall(ISuperchainConfig.version, ()), abi.encode("2.2.0")); + } + + /// @notice Tests that the upgradeSuperchainConfig function succeeds when the superchainConfig is at the expected + /// version and the delegate caller is the superchainProxyAdmin owner. + function test_upgradeSuperchainConfig_succeeds() public { + IOPContractsManager.Implementations memory impls = opcm.implementations(); + + ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); + + address superchainPAO = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); + vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + + vm.expectEmit(address(superchainConfig)); + emit Upgraded(impls.superchainConfigImpl); + DelegateCaller(superchainPAO).dcForward( + address(opcm), + abi.encodeCall(IOPContractsManager.upgradeSuperchainConfig, (superchainConfig, superchainProxyAdmin)) + ); + } + + /// @notice Tests that the upgradeSuperchainConfig function reverts when it is not called via delegatecall. + function test_upgradeSuperchainConfig_notDelegateCalled_reverts() public { + ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); + + vm.expectRevert(IOPContractsManager.OnlyDelegatecall.selector); + opcm.upgradeSuperchainConfig(superchainConfig, superchainProxyAdmin); + } + + /// @notice Tests that the upgradeSuperchainConfig function reverts when the delegate caller is not the + /// superchainProxyAdmin owner. + function test_upgradeSuperchainConfig_notProxyAdminOwner_reverts() public { + ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); + + address delegateCaller = makeAddr("delegateCaller"); + vm.etch(delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + + assertNotEq(superchainProxyAdmin.owner(), delegateCaller); + assertNotEq(proxyAdmin.owner(), delegateCaller); + + vm.expectRevert("Ownable: caller is not the owner"); + DelegateCaller(delegateCaller).dcForward( + address(opcm), + abi.encodeCall(IOPContractsManager.upgradeSuperchainConfig, (superchainConfig, superchainProxyAdmin)) + ); + } + + /// @notice Tests that the upgradeSuperchainConfig function reverts when the superchainConfig version is the same or + /// newer than the target version. + function test_upgradeSuperchainConfig_superchainConfigAlreadyUpToDate_reverts() public { + ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); + + // Set the version of the superchain config to a version that is the target version. + vm.clearMockedCalls(); + + // Mock the SuperchainConfig to return a very large version. + vm.mockCall(address(superchainConfig), abi.encodeCall(ISuperchainConfig.version, ()), abi.encode("99.99.99")); + + // Try to upgrade the SuperchainConfig contract again, should fail. + vm.expectRevert(IOPContractsManagerUpgrader.OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate.selector); + DelegateCaller(upgrader).dcForward( + address(opcm), + abi.encodeCall(IOPContractsManager.upgradeSuperchainConfig, (superchainConfig, superchainProxyAdmin)) + ); } } @@ -1555,6 +1453,12 @@ contract OPContractsManager_Migrate_Test is OPContractsManager_TestInit { Claim absolutePrestate1 = Claim.wrap(bytes32(hex"ABBA")); Claim absolutePrestate2 = Claim.wrap(bytes32(hex"DEAD")); + /// @notice Function requires interop portal. + function setUp() public virtual override { + super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + } + /// @notice Helper function to create the default migration input. function _getDefaultInput() internal view returns (IOPContractsManagerInteropMigrator.MigrateInput memory) { IOPContractsManagerInteropMigrator.GameParameters memory gameParameters = IOPContractsManagerInteropMigrator @@ -1625,6 +1529,11 @@ contract OPContractsManager_Migrate_Test is OPContractsManager_TestInit { assertEq(address(_disputeGameFactory.gameImpls(GameTypes.SUPER_CANNON)), address(0)); assertEq(address(_disputeGameFactory.gameImpls(GameTypes.PERMISSIONED_CANNON)), address(0)); assertEq(address(_disputeGameFactory.gameImpls(GameTypes.SUPER_PERMISSIONED_CANNON)), address(0)); + if (isDevFeatureEnabled(DevFeatures.CANNON_KONA)) { + // Only explicitly zeroed out if feature is enabled. Otherwise left unchanged (which may still be 0). + assertEq(address(_disputeGameFactory.gameImpls(GameTypes.CANNON_KONA)), address(0)); + assertEq(address(_disputeGameFactory.gameImpls(GameTypes.SUPER_CANNON_KONA)), address(0)); + } } /// @notice Tests that the migration function succeeds when requesting to use the @@ -1915,6 +1824,29 @@ contract OPContractsManager_Migrate_Test is OPContractsManager_TestInit { input, OPContractsManagerInteropMigrator.OPContractsManagerInteropMigrator_SuperchainConfigMismatch.selector ); } + + function test_migrate_zerosOutCannonKonaGameTypes_succeeds() public { + skipIfDevFeatureDisabled(DevFeatures.CANNON_KONA); + IOPContractsManagerInteropMigrator.MigrateInput memory input = _getDefaultInput(); + + // Grab the existing DisputeGameFactory for each chain. + IDisputeGameFactory oldDisputeGameFactory1 = + IDisputeGameFactory(payable(chainDeployOutput1.systemConfigProxy.disputeGameFactory())); + IDisputeGameFactory oldDisputeGameFactory2 = + IDisputeGameFactory(payable(chainDeployOutput2.systemConfigProxy.disputeGameFactory())); + // Ensure cannon kona games have implementations + oldDisputeGameFactory1.setImplementation(GameTypes.CANNON_KONA, IDisputeGame(address(1))); + oldDisputeGameFactory2.setImplementation(GameTypes.CANNON_KONA, IDisputeGame(address(1))); + oldDisputeGameFactory1.setImplementation(GameTypes.SUPER_CANNON_KONA, IDisputeGame(address(2))); + oldDisputeGameFactory2.setImplementation(GameTypes.SUPER_CANNON_KONA, IDisputeGame(address(2))); + + // Execute the migration. + _doMigration(input); + + // Assert that the old game implementations are now zeroed out. + _assertOldGamesZeroed(oldDisputeGameFactory1); + _assertOldGamesZeroed(oldDisputeGameFactory2); + } } /// @title OPContractsManager_Deploy_Test @@ -1930,79 +1862,57 @@ contract OPContractsManager_Deploy_Test is DeployOPChain_TestBase { event Deployed(uint256 indexed l2ChainId, address indexed deployer, bytes deployOutput); - function setUp() public override { - DeployOPChain_TestBase.setUp(); - - doi.set(doi.opChainProxyAdminOwner.selector, opChainProxyAdminOwner); - doi.set(doi.systemConfigOwner.selector, systemConfigOwner); - doi.set(doi.batcher.selector, batcher); - doi.set(doi.unsafeBlockSigner.selector, unsafeBlockSigner); - doi.set(doi.proposer.selector, proposer); - doi.set(doi.challenger.selector, challenger); - doi.set(doi.basefeeScalar.selector, basefeeScalar); - doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); - doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcm.selector, address(opcm)); - doi.set(doi.gasLimit.selector, gasLimit); - - doi.set(doi.disputeGameType.selector, disputeGameType); - doi.set(doi.disputeAbsolutePrestate.selector, disputeAbsolutePrestate); - doi.set(doi.disputeMaxGameDepth.selector, disputeMaxGameDepth); - doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth); - doi.set(doi.disputeClockExtension.selector, disputeClockExtension); - doi.set(doi.disputeMaxClockDuration.selector, disputeMaxClockDuration); - } - // This helper function is used to convert the input struct type defined in DeployOPChain.s.sol // to the input struct type defined in OPContractsManager.sol. - function toOPCMDeployInput(DeployOPChainInput _doi) + function toOPCMDeployInput(Types.DeployOPChainInput memory _doi) internal - view returns (IOPContractsManager.DeployInput memory) { + bytes memory startingAnchorRoot = new DeployOPChain().startingAnchorRoot(); return IOPContractsManager.DeployInput({ roles: IOPContractsManager.Roles({ - opChainProxyAdminOwner: _doi.opChainProxyAdminOwner(), - systemConfigOwner: _doi.systemConfigOwner(), - batcher: _doi.batcher(), - unsafeBlockSigner: _doi.unsafeBlockSigner(), - proposer: _doi.proposer(), - challenger: _doi.challenger() + opChainProxyAdminOwner: _doi.opChainProxyAdminOwner, + systemConfigOwner: _doi.systemConfigOwner, + batcher: _doi.batcher, + unsafeBlockSigner: _doi.unsafeBlockSigner, + proposer: _doi.proposer, + challenger: _doi.challenger }), - basefeeScalar: _doi.basefeeScalar(), - blobBasefeeScalar: _doi.blobBaseFeeScalar(), - l2ChainId: _doi.l2ChainId(), - startingAnchorRoot: _doi.startingAnchorRoot(), - saltMixer: _doi.saltMixer(), - gasLimit: _doi.gasLimit(), - disputeGameType: _doi.disputeGameType(), - disputeAbsolutePrestate: _doi.disputeAbsolutePrestate(), - disputeMaxGameDepth: _doi.disputeMaxGameDepth(), - disputeSplitDepth: _doi.disputeSplitDepth(), - disputeClockExtension: _doi.disputeClockExtension(), - disputeMaxClockDuration: _doi.disputeMaxClockDuration() + basefeeScalar: _doi.basefeeScalar, + blobBasefeeScalar: _doi.blobBaseFeeScalar, + l2ChainId: _doi.l2ChainId, + startingAnchorRoot: startingAnchorRoot, + saltMixer: _doi.saltMixer, + gasLimit: _doi.gasLimit, + disputeGameType: _doi.disputeGameType, + disputeAbsolutePrestate: _doi.disputeAbsolutePrestate, + disputeMaxGameDepth: _doi.disputeMaxGameDepth, + disputeSplitDepth: _doi.disputeSplitDepth, + disputeClockExtension: _doi.disputeClockExtension, + disputeMaxClockDuration: _doi.disputeMaxClockDuration }); } function test_deploy_l2ChainIdEqualsZero_reverts() public { - IOPContractsManager.DeployInput memory deployInput = toOPCMDeployInput(doi); - deployInput.l2ChainId = 0; + IOPContractsManager.DeployInput memory input = toOPCMDeployInput(deployOPChainInput); + input.l2ChainId = 0; + vm.expectRevert(IOPContractsManager.InvalidChainId.selector); - opcm.deploy(deployInput); + opcm.deploy(input); } function test_deploy_l2ChainIdEqualsCurrentChainId_reverts() public { - IOPContractsManager.DeployInput memory deployInput = toOPCMDeployInput(doi); - deployInput.l2ChainId = block.chainid; + IOPContractsManager.DeployInput memory input = toOPCMDeployInput(deployOPChainInput); + input.l2ChainId = block.chainid; vm.expectRevert(IOPContractsManager.InvalidChainId.selector); - opcm.deploy(deployInput); + opcm.deploy(input); } function test_deploy_succeeds() public { vm.expectEmit(true, true, true, false); // TODO precompute the expected `deployOutput`. - emit Deployed(doi.l2ChainId(), address(this), bytes("")); - opcm.deploy(toOPCMDeployInput(doi)); + emit Deployed(deployOPChainInput.l2ChainId, address(this), bytes("")); + opcm.deploy(toOPCMDeployInput(deployOPChainInput)); } } @@ -2021,51 +1931,3 @@ contract OPContractsManager_Version_Test is OPContractsManager_TestInit { assertNotEq(abi.encode(prestateUpdater.version()), abi.encode(0)); } } - -/// @title OPContractsManager_SetRC_Test -/// @notice Tests the `setRC` function of the `OPContractsManager` contract. -contract OPContractsManager_SetRC_Test is OPContractsManager_Upgrade_Harness { - event Released(bool _isRC); - - /// @notice Tests the setRC function can be set by the upgrade controller. - function test_setRC_succeeds(bool _isRC) public { - skipIfNotOpFork("test_setRC_succeeds"); - - vm.prank(upgrader); - - vm.expectEmit(true, true, true, true); - emit Released(_isRC); - - opcm.setRC(_isRC); - assertTrue(opcm.isRC() == _isRC, "isRC should be true"); - bytes memory releaseBytes = bytes(opcm.l1ContractsRelease()); - if (_isRC) { - assertEq(Bytes.slice(releaseBytes, releaseBytes.length - 3, 3), "-rc", "release should end with '-rc'"); - } else { - assertNotEq( - Bytes.slice(releaseBytes, releaseBytes.length - 3, 3), "-rc", "release should not end with '-rc'" - ); - } - } - - /// @notice Tests the setRC function can not be set by non-upgrade controller. - function test_setRC_nonUpgradeController_reverts(address _nonUpgradeController) public { - // Disallow the upgrade controller to have code, or be a 'special' address. - if ( - _nonUpgradeController == upgrader || _nonUpgradeController == address(0) - || _nonUpgradeController < address(0x4200000000000000000000000000000000000000) - || _nonUpgradeController > address(0x4200000000000000000000000000000000000800) - || _nonUpgradeController == address(vm) - || _nonUpgradeController == 0x000000000000000000636F6e736F6c652e6c6f67 - || _nonUpgradeController == 0x4e59b44847b379578588920cA78FbF26c0B4956C - || _nonUpgradeController.code.length > 0 - ) { - _nonUpgradeController = makeAddr("nonUpgradeController"); - } - - vm.prank(_nonUpgradeController); - - vm.expectRevert(IOPContractsManager.OnlyUpgradeController.selector); - opcm.setRC(true); - } -} diff --git a/packages/contracts-bedrock/test/L1/OPContractsManagerContractsContainer.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManagerContractsContainer.t.sol new file mode 100644 index 0000000000000..0028b6db8139d --- /dev/null +++ b/packages/contracts-bedrock/test/L1/OPContractsManagerContractsContainer.t.sol @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing +import { OPContractsManager_TestInit } from "test/L1/OPContractsManager.t.sol"; + +// Contracts +import { OPContractsManager, OPContractsManagerContractsContainer } from "src/L1/OPContractsManager.sol"; + +/// @title OPContractsManagerContractsContainer_Constructor_Test +/// @notice Tests the constructor of the `OPContractsManagerContractsContainer` contract. +contract OPContractsManagerContractsContainer_Constructor_Test is OPContractsManager_TestInit { + /// @notice Tests that the constructor succeeds when the devFeatureBitmap is in dev. + /// @param _devFeatureBitmap The devFeatureBitmap to use. + function testFuzz_constructor_devBitmapInDev_succeeds(bytes32 _devFeatureBitmap) public { + // Etch into the magic testing address. + vm.etch(address(0xbeefcafe), hex"01"); + + // Convert to proper OPCM type for construction. + OPContractsManager opcm2 = OPContractsManager(address(opcm)); + + // Should not revert. + OPContractsManagerContractsContainer container = new OPContractsManagerContractsContainer({ + _blueprints: opcm2.blueprints(), + _implementations: opcm2.implementations(), + _devFeatureBitmap: _devFeatureBitmap + }); + + // Should have the correct devFeatureBitmap. + assertEq(container.devFeatureBitmap(), _devFeatureBitmap); + } + + /// @notice Tests that the constructor reverts when the devFeatureBitmap is in prod. + /// @param _devFeatureBitmap The devFeatureBitmap to use. + function testFuzz_constructor_devBitmapInProd_reverts(bytes32 _devFeatureBitmap) public { + // Anything but zero! + _devFeatureBitmap = bytes32(bound(uint256(_devFeatureBitmap), 1, type(uint256).max)); + + // Make sure magic address has no code. + vm.etch(address(0xbeefcafe), bytes("")); + + // Convert to proper OPCM type for construction. + OPContractsManager opcm2 = OPContractsManager(address(opcm)); + + // Set the chain ID to 1. + vm.chainId(1); + + // Fetch ahead of time to avoid expectRevert applying to these functions by accident. + OPContractsManager.Blueprints memory blueprints = opcm2.blueprints(); + OPContractsManager.Implementations memory implementations = opcm2.implementations(); + + // Should revert. + vm.expectRevert( + OPContractsManagerContractsContainer.OPContractsManagerContractsContainer_DevFeatureInProd.selector + ); + OPContractsManagerContractsContainer container = new OPContractsManagerContractsContainer({ + _blueprints: blueprints, + _implementations: implementations, + _devFeatureBitmap: _devFeatureBitmap + }); + + // Constructor shouldn't have worked, foundry makes this return address(1). + assertEq(address(container), address(1)); + } + + /// @notice Tests that the constructor succeeds when the devFeatureBitmap is used on the + /// mainnet chain ID but this is actually a test environment as shown by the magic + /// address having code. + /// @param _devFeatureBitmap The devFeatureBitmap to use. + function test_constructor_devBitmapMainnetButTestEnv_succeeds(bytes32 _devFeatureBitmap) public { + // Make sure magic address has code. + vm.etch(address(0xbeefcafe), hex"01"); + + // Convert to proper OPCM type for construction. + OPContractsManager opcm2 = OPContractsManager(address(opcm)); + + // Set the chain ID to 1. + vm.chainId(1); + + // Should not revert. + OPContractsManagerContractsContainer container = new OPContractsManagerContractsContainer({ + _blueprints: opcm2.blueprints(), + _implementations: opcm2.implementations(), + _devFeatureBitmap: _devFeatureBitmap + }); + + // Should have the correct devFeatureBitmap. + assertEq(container.devFeatureBitmap(), _devFeatureBitmap); + } +} diff --git a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol index bbda893e41294..8bbbd9e8c2cd7 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol @@ -3,11 +3,13 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; // Libraries import { GameTypes, Duration, Claim } from "src/dispute/lib/Types.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; @@ -134,7 +136,7 @@ contract OPContractsManagerStandardValidator_TestInit is CommonTest { ); vm.mockCall( address(delayedWeth), - abi.encodeCall(IDelayedWETH.proxyAdminOwner, ()), + abi.encodeCall(IProxyAdminOwnedBase.proxyAdminOwner, ()), abi.encode(opcm.opcmStandardValidator().l1PAOMultisig()) ); // Use vm.store so that the .setImplementation call below works. @@ -280,7 +282,9 @@ contract OPContractsManagerStandardValidator_GeneralOverride_Test is OPContracts IOPContractsManagerStandardValidator.ValidationOverrides memory overrides = IOPContractsManagerStandardValidator .ValidationOverrides({ l1PAOMultisig: address(0xbad), challenger: address(0xc0ffee) }); vm.mockCall( - address(delayedWeth), abi.encodeCall(IDelayedWETH.proxyAdminOwner, ()), abi.encode(overrides.l1PAOMultisig) + address(delayedWeth), + abi.encodeCall(IProxyAdminOwnedBase.proxyAdminOwner, ()), + abi.encode(overrides.l1PAOMultisig) ); vm.mockCall(address(proxyAdmin), abi.encodeCall(IProxyAdmin.owner, ()), abi.encode(overrides.l1PAOMultisig)); vm.mockCall( @@ -332,7 +336,9 @@ contract OPContractsManagerStandardValidator_ProxyAdmin_Test is OPContractsManag /// ProxyAdmin owner is not correct. function test_validate_invalidProxyAdminOwner_succeeds() public { vm.mockCall(address(proxyAdmin), abi.encodeCall(IProxyAdmin.owner, ()), abi.encode(address(0xbad))); - vm.mockCall(address(delayedWeth), abi.encodeCall(IDelayedWETH.proxyAdminOwner, ()), abi.encode(address(0xbad))); + vm.mockCall( + address(delayedWeth), abi.encodeCall(IProxyAdminOwnedBase.proxyAdminOwner, ()), abi.encode(address(0xbad)) + ); assertEq("PROXYA-10,PDDG-DWETH-30,PLDG-DWETH-30", _validate(true)); } @@ -341,7 +347,7 @@ contract OPContractsManagerStandardValidator_ProxyAdmin_Test is OPContractsManag function test_validate_overridenProxyAdminOwner_succeeds() public { IOPContractsManagerStandardValidator.ValidationOverrides memory overrides = _defaultValidationOverrides(); overrides.l1PAOMultisig = address(0xbad); - vm.mockCall(address(delayedWeth), abi.encodeCall(IDelayedWETH.proxyAdminOwner, ()), abi.encode(0xbad)); + vm.mockCall(address(delayedWeth), abi.encodeCall(IProxyAdminOwnedBase.proxyAdminOwner, ()), abi.encode(0xbad)); vm.mockCall(address(proxyAdmin), abi.encodeCall(IProxyAdmin.owner, ()), abi.encode(address(0xbad))); vm.mockCall( address(disputeGameFactory), @@ -754,7 +760,12 @@ contract OPContractsManagerStandardValidator_ETHLockbox_Test is OPContractsManag /// ETHLockbox version is invalid. function test_validate_ethLockboxInvalidVersion_succeeds() public { vm.mockCall(address(ethLockbox), abi.encodeCall(ISemver.version, ()), abi.encode("0.0.0")); - assertEq("LOCKBOX-10", _validate(true)); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq("LOCKBOX-10", _validate(true)); + } else { + assertEq("", _validate(true)); + } } /// @notice Tests that the validate function successfully returns the right error when the @@ -765,7 +776,12 @@ contract OPContractsManagerStandardValidator_ETHLockbox_Test is OPContractsManag abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(ethLockbox))), abi.encode(address(0xbad)) ); - assertEq("LOCKBOX-20", _validate(true)); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq("LOCKBOX-20", _validate(true)); + } else { + assertEq("", _validate(true)); + } } /// @notice Tests that the validate function successfully returns the right error when the @@ -774,14 +790,24 @@ contract OPContractsManagerStandardValidator_ETHLockbox_Test is OPContractsManag vm.mockCall( address(ethLockbox), abi.encodeCall(IProxyAdminOwnedBase.proxyAdmin, ()), abi.encode(address(0xbad)) ); - assertEq("LOCKBOX-30", _validate(true)); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq("LOCKBOX-30", _validate(true)); + } else { + assertEq("", _validate(true)); + } } /// @notice Tests that the validate function successfully returns the right error when the /// ETHLockbox systemConfig is invalid. function test_validate_ethLockboxInvalidSystemConfig_succeeds() public { vm.mockCall(address(ethLockbox), abi.encodeCall(IETHLockbox.systemConfig, ()), abi.encode(address(0xbad))); - assertEq("LOCKBOX-40", _validate(true)); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq("LOCKBOX-40", _validate(true)); + } else { + assertEq("", _validate(true)); + } } /// @notice Tests that the validate function successfully returns the right error when the @@ -790,7 +816,12 @@ contract OPContractsManagerStandardValidator_ETHLockbox_Test is OPContractsManag vm.mockCall( address(ethLockbox), abi.encodeCall(IETHLockbox.authorizedPortals, (optimismPortal2)), abi.encode(false) ); - assertEq("LOCKBOX-50", _validate(true)); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq("LOCKBOX-50", _validate(true)); + } else { + assertEq("", _validate(true)); + } } } @@ -871,7 +902,9 @@ contract OPContractsManagerStandardValidator_PermissionedDisputeGame_Test is function test_validate_permissionedDisputeGameInvalidVM_succeeds() public { vm.mockCall(address(pdg), abi.encodeCall(IPermissionedDisputeGame.vm, ()), abi.encode(address(0xbad))); vm.mockCall(address(0xbad), abi.encodeCall(ISemver.version, ()), abi.encode("0.0.0")); - vm.mockCall(address(0xbad), abi.encodeCall(IMIPS64.stateVersion, ()), abi.encode(7)); + vm.mockCall( + address(0xbad), abi.encodeCall(IMIPS64.stateVersion, ()), abi.encode(StandardConstants.MIPS_VERSION) + ); assertEq("PDDG-VM-10,PDDG-VM-20", _validate(true)); } @@ -1187,7 +1220,9 @@ contract OPContractsManagerStandardValidator_FaultDisputeGame_Test is OPContract function test_validate_faultDisputeGameInvalidVM_succeeds() public { vm.mockCall(address(fdg), abi.encodeCall(IFaultDisputeGame.vm, ()), abi.encode(address(0xbad))); vm.mockCall(address(0xbad), abi.encodeCall(ISemver.version, ()), abi.encode("0.0.0")); - vm.mockCall(address(0xbad), abi.encodeCall(IMIPS64.stateVersion, ()), abi.encode(7)); + vm.mockCall( + address(0xbad), abi.encodeCall(IMIPS64.stateVersion, ()), abi.encode(StandardConstants.MIPS_VERSION) + ); assertEq("PLDG-VM-10,PLDG-VM-20", _validate(true)); } @@ -1316,34 +1351,43 @@ contract OPContractsManagerStandardValidator_Versions_Test is OPContractsManager /// @notice Tests that the version getter functions on `OPContractsManagerStandardValidator` return non-empty /// strings. function test_versions_succeeds() public view { - assertTrue(bytes(opcm.opcmStandardValidator().systemConfigVersion()).length > 0, "systemConfigVersion empty"); assertTrue( - bytes(opcm.opcmStandardValidator().optimismPortalVersion()).length > 0, "optimismPortalVersion empty" + bytes(ISemver(opcm.opcmStandardValidator().systemConfigImpl()).version()).length > 0, + "systemConfigVersion empty" + ); + assertTrue( + bytes(ISemver(opcm.opcmStandardValidator().optimismPortalImpl()).version()).length > 0, + "optimismPortalVersion empty" ); assertTrue( - bytes(opcm.opcmStandardValidator().l1CrossDomainMessengerVersion()).length > 0, + bytes(ISemver(opcm.opcmStandardValidator().l1CrossDomainMessengerImpl()).version()).length > 0, "l1CrossDomainMessengerVersion empty" ); assertTrue( - bytes(opcm.opcmStandardValidator().l1ERC721BridgeVersion()).length > 0, "l1ERC721BridgeVersion empty" + bytes(ISemver(opcm.opcmStandardValidator().l1ERC721BridgeImpl()).version()).length > 0, + "l1ERC721BridgeVersion empty" ); assertTrue( - bytes(opcm.opcmStandardValidator().l1StandardBridgeVersion()).length > 0, "l1StandardBridgeVersion empty" + bytes(ISemver(opcm.opcmStandardValidator().l1StandardBridgeImpl()).version()).length > 0, + "l1StandardBridgeVersion empty" ); - assertTrue(bytes(opcm.opcmStandardValidator().mipsVersion()).length > 0, "mipsVersion empty"); + assertTrue(bytes(ISemver(opcm.opcmStandardValidator().mipsImpl()).version()).length > 0, "mipsVersion empty"); assertTrue( - bytes(opcm.opcmStandardValidator().optimismMintableERC20FactoryVersion()).length > 0, + bytes(ISemver(opcm.opcmStandardValidator().optimismMintableERC20FactoryImpl()).version()).length > 0, "optimismMintableERC20FactoryVersion empty" ); assertTrue( - bytes(opcm.opcmStandardValidator().disputeGameFactoryVersion()).length > 0, + bytes(ISemver(opcm.opcmStandardValidator().disputeGameFactoryImpl()).version()).length > 0, "disputeGameFactoryVersion empty" ); assertTrue( - bytes(opcm.opcmStandardValidator().anchorStateRegistryVersion()).length > 0, + bytes(ISemver(opcm.opcmStandardValidator().anchorStateRegistryImpl()).version()).length > 0, "anchorStateRegistryVersion empty" ); - assertTrue(bytes(opcm.opcmStandardValidator().delayedWETHVersion()).length > 0, "delayedWETHVersion empty"); + assertTrue( + bytes(ISemver(opcm.opcmStandardValidator().delayedWETHImpl()).version()).length > 0, + "delayedWETHVersion empty" + ); assertTrue( bytes(opcm.opcmStandardValidator().permissionedDisputeGameVersion()).length > 0, "permissionedDisputeGameVersion empty" @@ -1351,6 +1395,9 @@ contract OPContractsManagerStandardValidator_Versions_Test is OPContractsManager assertTrue( bytes(opcm.opcmStandardValidator().preimageOracleVersion()).length > 0, "preimageOracleVersion empty" ); - assertTrue(bytes(opcm.opcmStandardValidator().ethLockboxVersion()).length > 0, "ethLockboxVersion empty"); + assertTrue( + bytes(ISemver(opcm.opcmStandardValidator().ethLockboxImpl()).version()).length > 0, + "ethLockboxVersion empty" + ); } } diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index a99bdde4fe2f1..90691b95afa8f 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -19,11 +19,14 @@ import { Hashing } from "src/libraries/Hashing.sol"; import { Constants } from "src/libraries/Constants.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { Features } from "src/libraries/Features.sol"; import "src/dispute/lib/Types.sol"; // Interfaces import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { IOptimismPortal2 as IOptimismPortal } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; import { IProxy } from "interfaces/universal/IProxy.sol"; @@ -45,8 +48,8 @@ contract OptimismPortal2_TestInit is DisputeGameFactory_TestInit { bytes[] _withdrawalProof; Types.OutputRootProof internal _outputRootProof; GameType internal respectedGameType; - // Use a constructor to set the storage vars above, so as to minimize the number of ffi calls. + // Use a constructor to set the storage vars above, so as to minimize the number of ffi calls. constructor() { super.setUp(); @@ -115,7 +118,10 @@ contract OptimismPortal2_TestInit is DisputeGameFactory_TestInit { vm.warp(block.timestamp + game.maxClockDuration().raw() + 1 seconds); // Fund the portal so that we can withdraw ETH. - vm.deal(address(ethLockbox), 0xFFFFFFFF); + vm.deal(address(optimismPortal2), 0xFFFFFFFF); + if (isUsingLockbox()) { + vm.deal(address(ethLockbox), 0xFFFFFFFF); + } } /// @notice Asserts that the reentrant call will revert. @@ -134,7 +140,7 @@ contract OptimismPortal2_TestInit is DisputeGameFactory_TestInit { /// @param _superRootsActive The value to set the superRootsActive variable to. function setSuperRootsActive(bool _superRootsActive) public { // Get the slot for superRootsActive. - StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "superRootsActive"); + StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortalInterop", "superRootsActive"); // Load the existing storage slot value. bytes32 existingValue = vm.load(address(optimismPortal2), bytes32(slot.slot)); @@ -147,6 +153,31 @@ contract OptimismPortal2_TestInit is DisputeGameFactory_TestInit { // Store the new value at the correct slot/offset. vm.store(address(optimismPortal2), bytes32(slot.slot), newValue); } + + /// @notice Checks if the ETHLockbox feature is enabled. + /// @return bool True if the ETHLockbox feature is enabled. + function isUsingLockbox() public view returns (bool) { + return + systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX) && address(optimismPortal2.ethLockbox()) != address(0); + } + + /// @notice Enables the ETHLockbox feature if not enabled. + /// @param _lockbox Address of the lockbox to enable. + function forceEnableLockbox(address _lockbox) public { + if (!isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.prank(address(proxyAdmin)); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + } + + // Overwrite the lockbox either way. + StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "ethLockbox"); + vm.store(address(optimismPortal2), bytes32(slot.slot), bytes32(uint256(uint160(address(_lockbox))))); + + // If the recipient address has no code, store STOP so we don't get reverts. + if (address(_lockbox).code.length == 0) { + vm.etch(address(_lockbox), hex"00"); + } + } } /// @title OptimismPortal2_Version_Test @@ -188,7 +219,12 @@ contract OptimismPortal2_Initialize_Test is OptimismPortal2_TestInit { assertEq(optimismPortal2.l2Sender(), Constants.DEFAULT_L2_SENDER); assertEq(optimismPortal2.paused(), false); assertEq(address(optimismPortal2.systemConfig()), address(systemConfig)); - assertEq(address(optimismPortal2.ethLockbox()), address(ethLockbox)); + + if (isUsingLockbox()) { + assertEq(address(optimismPortal2.ethLockbox()), address(ethLockbox)); + } else { + assertEq(address(optimismPortal2.ethLockbox()), address(0)); + } returnIfForkTest( "OptimismPortal2_Initialize_Test: Do not check guardian and respectedGameType on forked networks" @@ -220,6 +256,30 @@ contract OptimismPortal2_Initialize_Test is OptimismPortal2_TestInit { /// @notice Tests that the initialize function reverts if called by a non-proxy admin or owner. /// @param _sender The address of the sender to test. function testFuzz_initialize_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { + skipIfDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + + // Prank as the not ProxyAdmin or ProxyAdmin owner. + vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); + + // Get the slot for _initialized. + StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "_initialized"); + + // Set the initialized slot to 0. + vm.store(address(optimismPortal2), bytes32(slot.slot), bytes32(0)); + + // Expect the revert with `ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner` selector. + vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); + + // Call the `initialize` function with the sender + vm.prank(_sender); + optimismPortal2.initialize(systemConfig, anchorStateRegistry); + } + + /// @notice Tests that the initialize function reverts if called by a non-proxy admin or owner. + /// @param _sender The address of the sender to test. + function testFuzz_initialize_interopNotProxyAdminOrProxyAdminOwner_reverts(address _sender) public { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Prank as the not ProxyAdmin or ProxyAdmin owner. vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); @@ -234,19 +294,52 @@ contract OptimismPortal2_Initialize_Test is OptimismPortal2_TestInit { // Call the `initialize` function with the sender vm.prank(_sender); - optimismPortal2.initialize(systemConfig, anchorStateRegistry, ethLockbox); + IOptimismPortalInterop(payable(optimismPortal2)).initialize(systemConfig, anchorStateRegistry, ethLockbox); + } + + /// @notice Tests that the initialize function reverts when lockbox state is invalid. + function test_initialize_invalidLockboxState_reverts() external { + skipIfDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + + // Get the slot for _initialized. + StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "_initialized"); + + // Set the initialized slot to 0. + vm.store(address(optimismPortal2), bytes32(slot.slot), bytes32(0)); + + // Enable ETH_LOCKBOX feature but clear the lockbox address to create invalid state. + if (!systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.prank(address(proxyAdmin)); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + } + + // Clear the lockbox address. + StorageSlot memory lockboxSlot = ForgeArtifacts.getSlot("OptimismPortal2", "ethLockbox"); + vm.store(address(optimismPortal2), bytes32(lockboxSlot.slot), bytes32(0)); + + // Expect the revert with `OptimismPortal_InvalidLockboxState` selector. + vm.expectRevert(IOptimismPortal.OptimismPortal_InvalidLockboxState.selector); + + // Call the `initialize` function + vm.prank(address(proxyAdmin)); + optimismPortal2.initialize(systemConfig, anchorStateRegistry); } } -/// @title OptimismPortal2_Upgrade_Test +/// @title OptimismPortal2_UpgradeInterop_Test /// @notice Reusable test for the current upgrade() function in the OptimismPortal2 contract. If /// the upgrade() function is changed, tests inside of this contract should be updated to /// reflect the new function. If the upgrade() function is removed, remove the /// corresponding tests but leave this contract in place so it's easy to add tests back /// in the future. -contract OptimismPortal2_Upgrade_Test is CommonTest { +contract OptimismPortal2_UpgradeInterop_Test is CommonTest { + function setUp() public virtual override { + super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + } + /// @notice Tests that the upgrade() function succeeds. - function testFuzz_upgrade_succeeds(address _newAnchorStateRegistry, uint256 _balance) external { + function testFuzz_upgrade_interop_succeeds(address _newAnchorStateRegistry, uint256 _balance) external { // Prevent overflow on an upgrade context _balance = bound(_balance, 0, type(uint256).max - address(ethLockbox).balance); @@ -265,11 +358,13 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Call the upgrade function. vm.prank(address(optimismPortal2.proxyAdmin())); - optimismPortal2.upgrade(IAnchorStateRegistry(_newAnchorStateRegistry), IETHLockbox(ethLockbox)); + IOptimismPortalInterop(payable(optimismPortal2)).upgrade( + IAnchorStateRegistry(_newAnchorStateRegistry), IETHLockbox(ethLockbox) + ); // Verify that the initialized slot was updated. bytes32 initializedSlotAfter = vm.load(address(optimismPortal2), bytes32(slot.slot)); - assertEq(initializedSlotAfter, bytes32(uint256(2))); + assertEq(initializedSlotAfter, bytes32(uint256(optimismPortal2.initVersion()))); // Assert the portal is properly upgraded. assertEq(address(optimismPortal2.ethLockbox()), address(ethLockbox)); @@ -281,7 +376,7 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Now we migrate liquidity. vm.prank(proxyAdminOwner); - optimismPortal2.migrateLiquidity(); + IOptimismPortalInterop(payable(optimismPortal2)).migrateLiquidity(); // Balance has been updated. assertEq(address(optimismPortal2).balance, 0); @@ -298,12 +393,16 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Trigger first upgrade. vm.prank(address(optimismPortal2.proxyAdmin())); - optimismPortal2.upgrade(IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox)); + IOptimismPortalInterop(payable(optimismPortal2)).upgrade( + IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox) + ); // Try to trigger second upgrade. vm.prank(address(optimismPortal2.proxyAdmin())); vm.expectRevert("Initializable: contract is already initialized"); - optimismPortal2.upgrade(IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox)); + IOptimismPortalInterop(payable(optimismPortal2)).upgrade( + IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox) + ); } /// @notice Tests that the upgrade() function reverts if called after initialization. @@ -311,9 +410,9 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Get the slot for _initialized. StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "_initialized"); - // Slot value should be set to 2 (already initialized). + // Slot value should be set to already initialized. bytes32 initializedSlotBefore = vm.load(address(optimismPortal2), bytes32(slot.slot)); - assertEq(initializedSlotBefore, bytes32(uint256(2))); + assertEq(initializedSlotBefore, bytes32(uint256(optimismPortal2.initVersion()))); // AnchorStateRegistry address should be non-zero. assertNotEq(address(optimismPortal2.anchorStateRegistry()), address(0)); @@ -323,7 +422,9 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Try to trigger upgrade(). vm.expectRevert("Initializable: contract is already initialized"); - optimismPortal2.upgrade(IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox)); + IOptimismPortalInterop(payable(optimismPortal2)).upgrade( + IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox) + ); } /// @notice Tests that the upgrade() function reverts if called by a non-proxy admin or owner. @@ -343,20 +444,175 @@ contract OptimismPortal2_Upgrade_Test is CommonTest { // Call the `upgrade` function with the sender vm.prank(_sender); - optimismPortal2.upgrade(IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox)); + IOptimismPortalInterop(payable(optimismPortal2)).upgrade( + IAnchorStateRegistry(address(0xdeadbeef)), IETHLockbox(ethLockbox) + ); } } /// @title OptimismPortal2_MinimumGasLimit_Test /// @notice Test contract for OptimismPortal2 `minimumGasLimit` function. contract OptimismPortal2_MinimumGasLimit_Test is OptimismPortal2_TestInit { - /// @notice Tests that `minimumGasLimit` succeeds for small calldata sizes. + /// @notice Tests that `minimumGasLimit` succeeds for various calldata sizes. /// @dev The gas limit should be 21k for 0 calldata and increase linearly for larger calldata /// sizes. - function test_minimumGasLimit_succeeds() external view { + function test_minimumGasLimit_zeroCalldata_succeeds() external view { assertEq(optimismPortal2.minimumGasLimit(0), 21_000); - assertTrue(optimismPortal2.minimumGasLimit(2) > optimismPortal2.minimumGasLimit(1)); - assertTrue(optimismPortal2.minimumGasLimit(3) > optimismPortal2.minimumGasLimit(2)); + } + + /// @notice Tests that `minimumGasLimit` increases linearly with calldata size. + function testFuzz_minimumGasLimit_increasesLinearly_succeeds(uint64 _byteCount) external view { + // Bound to prevent overflow: ensure _byteCount * 40 + 21000 fits in uint64 + // Max safe value: (type(uint64).max - 21000) / 40 + _byteCount = uint64(bound(_byteCount, 1, (type(uint64).max - 21_000) / 40 - 1)); + + uint64 gasLimit1 = optimismPortal2.minimumGasLimit(_byteCount); + uint64 gasLimit2 = optimismPortal2.minimumGasLimit(_byteCount + 1); + + // Should increase by exactly 40 gas per byte + assertEq(gasLimit2, gasLimit1 + 40); + + // Should always be at least 21k base cost + linear increase + assertEq(gasLimit1, 21_000 + (_byteCount * 40)); + } +} + +/// @title OptimismPortal2_Paused_Test +/// @notice Test contract for OptimismPortal2 `paused` function. +contract OptimismPortal2_Paused_Test is OptimismPortal2_TestInit { + /// @notice Tests that `paused` returns the correct paused status. + function test_paused_succeeds() external view { + assertEq(optimismPortal2.paused(), systemConfig.paused()); + } +} + +/// @title OptimismPortal2_ProofMaturityDelaySeconds_Test +/// @notice Test contract for OptimismPortal2 `proofMaturityDelaySeconds` function. +contract OptimismPortal2_ProofMaturityDelaySeconds_Test is OptimismPortal2_TestInit { + /// @notice Tests that `proofMaturityDelaySeconds` returns the correct delay. + function test_proofMaturityDelaySeconds_succeeds() external view { + assertTrue(optimismPortal2.proofMaturityDelaySeconds() > 0); + } +} + +/// @title OptimismPortal2_DisputeGameFactory_Test +/// @notice Test contract for OptimismPortal2 `disputeGameFactory` function. +contract OptimismPortal2_DisputeGameFactory_Test is OptimismPortal2_TestInit { + /// @notice Tests that `disputeGameFactory` returns the correct address. + function test_disputeGameFactory_succeeds() external view { + assertEq(address(optimismPortal2.disputeGameFactory()), address(disputeGameFactory)); + } +} + +/// @title OptimismPortal2_SuperchainConfig_Test +/// @notice Test contract for OptimismPortal2 `superchainConfig` function. +contract OptimismPortal2_SuperchainConfig_Test is OptimismPortal2_TestInit { + /// @notice Tests that `superchainConfig` returns the correct address. + function test_superchainConfig_succeeds() external view { + assertEq(address(optimismPortal2.superchainConfig()), address(superchainConfig)); + } +} + +/// @title OptimismPortal2_Guardian_Test +/// @notice Test contract for OptimismPortal2 `guardian` function. +contract OptimismPortal2_Guardian_Test is OptimismPortal2_TestInit { + /// @notice Tests that `guardian` returns the correct address. + function test_guardian_succeeds() external view { + assertEq(optimismPortal2.guardian(), systemConfig.guardian()); + } +} + +/// @title OptimismPortal2_DisputeGameFinalityDelaySeconds_Test +/// @notice Test contract for OptimismPortal2 `disputeGameFinalityDelaySeconds` function. +contract OptimismPortal2_DisputeGameFinalityDelaySeconds_Test is OptimismPortal2_TestInit { + /// @notice Tests that `disputeGameFinalityDelaySeconds` returns the correct delay. + function test_disputeGameFinalityDelaySeconds_succeeds() external view { + assertEq( + optimismPortal2.disputeGameFinalityDelaySeconds(), anchorStateRegistry.disputeGameFinalityDelaySeconds() + ); + } +} + +/// @title OptimismPortal2_RespectedGameType_Test +/// @notice Test contract for OptimismPortal2 `respectedGameType` function. +contract OptimismPortal2_RespectedGameType_Test is OptimismPortal2_TestInit { + /// @notice Tests that `respectedGameType` returns the correct game type. + function test_respectedGameType_succeeds() external view { + assertEq(optimismPortal2.respectedGameType().raw(), anchorStateRegistry.respectedGameType().raw()); + } +} + +/// @title OptimismPortal2_RespectedGameTypeUpdatedAt_Test +/// @notice Test contract for OptimismPortal2 `respectedGameTypeUpdatedAt` function. +contract OptimismPortal2_RespectedGameTypeUpdatedAt_Test is OptimismPortal2_TestInit { + /// @notice Tests that `respectedGameTypeUpdatedAt` returns the correct timestamp. + function test_respectedGameTypeUpdatedAt_succeeds() external view { + assertEq(optimismPortal2.respectedGameTypeUpdatedAt(), anchorStateRegistry.retirementTimestamp()); + } +} + +/// @title OptimismPortal2_DisputeGameBlacklist_Test +/// @notice Test contract for OptimismPortal2 `disputeGameBlacklist` function. +contract OptimismPortal2_DisputeGameBlacklist_Test is OptimismPortal2_TestInit { + /// @notice Tests that `disputeGameBlacklist` returns false for non-blacklisted games. + function test_disputeGameBlacklist_nonBlacklisted_succeeds() external view { + assertFalse(optimismPortal2.disputeGameBlacklist(game)); + } + + /// @notice Tests that `disputeGameBlacklist` returns the correct status for any game. + function testFuzz_disputeGameBlacklist_succeeds(IDisputeGame _game) external view { + bool expected = anchorStateRegistry.disputeGameBlacklist(_game); + assertEq(optimismPortal2.disputeGameBlacklist(_game), expected); + } +} + +/// @title OptimismPortal2_NumProofSubmitters_Test +/// @notice Test contract for OptimismPortal2 `numProofSubmitters` function. +contract OptimismPortal2_NumProofSubmitters_Test is OptimismPortal2_TestInit { + /// @notice Tests that `numProofSubmitters` returns zero for unproven withdrawals. + function test_numProofSubmitters_unprovenWithdrawal_succeeds() external view { + bytes32 withdrawalHash = Hashing.hashWithdrawal(_defaultTx); + assertEq(optimismPortal2.numProofSubmitters(withdrawalHash), 0); + } + + /// @notice Tests that `numProofSubmitters` returns the correct count after proving. + function test_numProofSubmitters_provenWithdrawal_succeeds() external { + bytes32 withdrawalHash = Hashing.hashWithdrawal(_defaultTx); + + // Prove the withdrawal + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx, + _disputeGameIndex: _proposedGameIndex, + _outputRootProof: _outputRootProof, + _withdrawalProof: _withdrawalProof + }); + + assertEq(optimismPortal2.numProofSubmitters(withdrawalHash), 1); + } + + /// @notice Tests that `numProofSubmitters` increases with multiple proofs. + function testFuzz_numProofSubmitters_multipleProofs_succeeds(address _prover) external { + vm.assume(_prover != address(0) && _prover != address(this)); + bytes32 withdrawalHash = Hashing.hashWithdrawal(_defaultTx); + + // First proof by this contract + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx, + _disputeGameIndex: _proposedGameIndex, + _outputRootProof: _outputRootProof, + _withdrawalProof: _withdrawalProof + }); + + // Second proof by different prover + vm.prank(_prover); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx, + _disputeGameIndex: _proposedGameIndex, + _outputRootProof: _outputRootProof, + _withdrawalProof: _withdrawalProof + }); + + assertEq(optimismPortal2.numProofSubmitters(withdrawalHash), 2); } } @@ -382,8 +638,52 @@ contract OptimismPortal2_Receive_Test is OptimismPortal2_TestInit { _data: hex"" }); + if (isUsingLockbox()) { + // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. + vm.expectCall(address(ethLockbox), _value, abi.encodeCall(ethLockbox.lockETH, ()), _value > 0 ? 1 : 0); + } + + // give alice money and send as an eoa + vm.deal(alice, _value); + vm.prank(alice, alice); + (bool s,) = address(optimismPortal2).call{ value: _value }(hex""); + + assertTrue(s); + + if (isUsingLockbox()) { + assertEq(address(optimismPortal2).balance, balanceBefore); + assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _value); + } else { + assertEq(address(optimismPortal2).balance, balanceBefore + _value); + } + } + + function testFuzz_receive_withLockbox_succeeds(uint256 _value) external { + // Prevent overflow on an upgrade context. + // We use a dummy lockbox here because the real one won't work for upgrade tests. + address dummyLockbox = address(0xdeadbeef); + _value = bound(_value, 0, type(uint256).max - address(dummyLockbox).balance); + uint256 balanceBefore = address(optimismPortal2).balance; + uint256 lockboxBalanceBefore = address(dummyLockbox).balance; + _value = bound(_value, 0, type(uint256).max - balanceBefore); + + // Enable the lockbox. + forceEnableLockbox(dummyLockbox); + + // Expect the transaction deposited event. + vm.expectEmit(address(optimismPortal2)); + emitTransactionDeposited({ + _from: alice, + _to: alice, + _value: _value, + _mint: _value, + _gasLimit: 100_000, + _isCreation: false, + _data: hex"" + }); + // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. - vm.expectCall(address(ethLockbox), _value, abi.encodeCall(ethLockbox.lockETH, ()), _value > 0 ? 1 : 0); + vm.expectCall(address(dummyLockbox), _value, abi.encodeCall(ethLockbox.lockETH, ()), _value > 0 ? 1 : 0); // give alice money and send as an eoa vm.deal(alice, _value); @@ -392,7 +692,7 @@ contract OptimismPortal2_Receive_Test is OptimismPortal2_TestInit { assertTrue(s); assertEq(address(optimismPortal2).balance, balanceBefore); - assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _value); + assertEq(address(dummyLockbox).balance, lockboxBalanceBefore + _value); } } @@ -457,13 +757,18 @@ contract OptimismPortal2_DonateETH_Test is OptimismPortal2_TestInit { /// @title OptimismPortal2_MigrateLiquidity_Test /// @notice Test contract for OptimismPortal2 `migrateLiquidity` function. contract OptimismPortal2_MigrateLiquidity_Test is CommonTest { + function setUp() public virtual override { + super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + } + /// @notice Tests the liquidity migration from the portal to the lockbox reverts if not called /// by the admin owner. function testFuzz_migrateLiquidity_notProxyAdminOwner_reverts(address _caller) external { vm.assume(_caller != optimismPortal2.proxyAdminOwner()); vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOwner.selector); vm.prank(_caller); - optimismPortal2.migrateLiquidity(); + IOptimismPortalInterop(payable(optimismPortal2)).migrateLiquidity(); } /// @notice Tests that the liquidity migration from the portal to the lockbox succeeds. @@ -480,7 +785,7 @@ contract OptimismPortal2_MigrateLiquidity_Test is CommonTest { emit ETHMigrated(address(ethLockbox), _portalBalance); vm.prank(proxyAdminOwner); - optimismPortal2.migrateLiquidity(); + IOptimismPortalInterop(payable(optimismPortal2)).migrateLiquidity(); assertEq(address(optimismPortal2).balance, 0); assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _portalBalance); @@ -490,6 +795,11 @@ contract OptimismPortal2_MigrateLiquidity_Test is CommonTest { /// @title OptimismPortal2_MigrateToSuperRoots_Test /// @notice Test contract for OptimismPortal2 `migrateToSuperRoots` function. contract OptimismPortal2_MigrateToSuperRoots_Test is OptimismPortal2_TestInit { + function setUp() public override { + super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + } + /// @notice Tests that `migrateToSuperRoots` reverts if the caller is not the proxy admin /// owner. function testFuzz_migrateToSuperRoots_notProxyAdminOwner_reverts(address _caller) external { @@ -497,7 +807,9 @@ contract OptimismPortal2_MigrateToSuperRoots_Test is OptimismPortal2_TestInit { vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOwner.selector); vm.prank(_caller); - optimismPortal2.migrateToSuperRoots(IETHLockbox(address(1)), IAnchorStateRegistry(address(1))); + IOptimismPortalInterop(payable(optimismPortal2)).migrateToSuperRoots( + IETHLockbox(address(1)), IAnchorStateRegistry(address(1)) + ); } /// @notice Tests that `migrateToSuperRoots` reverts if the new registry is the same as the @@ -513,9 +825,11 @@ contract OptimismPortal2_MigrateToSuperRoots_Test is OptimismPortal2_TestInit { address caller = optimismPortal2.proxyAdminOwner(); // Expect the migration to revert. - vm.expectRevert(IOptimismPortal.OptimismPortal_MigratingToSameRegistry.selector); + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_MigratingToSameRegistry.selector); vm.prank(caller); - optimismPortal2.migrateToSuperRoots(IETHLockbox(_newLockbox), newAnchorStateRegistry); + IOptimismPortalInterop(payable(optimismPortal2)).migrateToSuperRoots( + IETHLockbox(_newLockbox), newAnchorStateRegistry + ); } /// @notice Tests that `migrateToSuperRoots` updates the ETHLockbox contract, updates the @@ -532,11 +846,13 @@ contract OptimismPortal2_MigrateToSuperRoots_Test is OptimismPortal2_TestInit { emit PortalMigrated(oldLockbox, _newLockbox, oldAnchorStateRegistry, _newAnchorStateRegistry); vm.prank(optimismPortal2.proxyAdminOwner()); - optimismPortal2.migrateToSuperRoots(IETHLockbox(_newLockbox), IAnchorStateRegistry(_newAnchorStateRegistry)); + IOptimismPortalInterop(payable(optimismPortal2)).migrateToSuperRoots( + IETHLockbox(_newLockbox), IAnchorStateRegistry(_newAnchorStateRegistry) + ); assertEq(address(optimismPortal2.ethLockbox()), _newLockbox); assertEq(address(optimismPortal2.anchorStateRegistry()), _newAnchorStateRegistry); - assertTrue(optimismPortal2.superRootsActive()); + assertTrue(IOptimismPortalInterop(payable(optimismPortal2)).superRootsActive()); } /// @notice Tests that `migrateToSuperRoots` reverts when the system is paused. @@ -548,7 +864,9 @@ contract OptimismPortal2_MigrateToSuperRoots_Test is OptimismPortal2_TestInit { address caller = optimismPortal2.proxyAdminOwner(); vm.expectRevert(IOptimismPortal.OptimismPortal_CallPaused.selector); vm.prank(caller); - optimismPortal2.migrateToSuperRoots(IETHLockbox(address(1)), IAnchorStateRegistry(address(1))); + IOptimismPortalInterop(payable(optimismPortal2)).migrateToSuperRoots( + IETHLockbox(address(1)), IAnchorStateRegistry(address(1)) + ); } } @@ -582,14 +900,16 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test _withdrawalProof: _withdrawalProof }); - _defaultTx.target = address(ethLockbox); - vm.expectRevert(IOptimismPortal.OptimismPortal_BadTarget.selector); - optimismPortal2.proveWithdrawalTransaction({ - _tx: _defaultTx, - _disputeGameIndex: _proposedGameIndex, - _outputRootProof: _outputRootProof, - _withdrawalProof: _withdrawalProof - }); + if (isUsingLockbox()) { + _defaultTx.target = address(ethLockbox); + vm.expectRevert(IOptimismPortal.OptimismPortal_BadTarget.selector); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx, + _disputeGameIndex: _proposedGameIndex, + _outputRootProof: _outputRootProof, + _withdrawalProof: _withdrawalProof + }); + } } /// @notice Tests that `proveWithdrawalTransaction` reverts when the current timestamp is less @@ -813,12 +1133,14 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// @notice Tests that `proveWithdrawalTransaction` reverts when using the Output Roots version /// of `proveWithdrawalTransaction` when `superRootsActive` is true. function test_proveWithdrawalTransaction_outputRootVersionWhenSuperRootsActive_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Set superRootsActive to true. setSuperRootsActive(true); // Should revert. - vm.expectRevert(IOptimismPortal.OptimismPortal_WrongProofMethod.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_WrongProofMethod.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameIndex: _proposedGameIndex, _outputRootProof: _outputRootProof, @@ -829,6 +1151,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// @notice Tests that `proveWithdrawalTransaction` reverts when using the Super Roots version /// of `proveWithdrawalTransaction` when `superRootsActive` is false. function test_proveWithdrawalTransaction_superRootsVersionWhenSuperRootsInactive_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Set up a dummy super root proof. Types.OutputRootWithChainId[] memory outputRootWithChainIdArr = new Types.OutputRootWithChainId[](1); outputRootWithChainIdArr[0] = @@ -840,8 +1164,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test }); // Should revert. - vm.expectRevert(IOptimismPortal.OptimismPortal_WrongProofMethod.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_WrongProofMethod.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: 0, @@ -854,6 +1178,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// @notice Tests that `proveWithdrawalTransaction` reverts when using the Super Roots version /// of `proveWithdrawalTransaction` when the provided proof is invalid. function test_proveWithdrawalTransaction_superRootsVersionBadProof_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Enable super roots. setSuperRootsActive(true); @@ -868,8 +1194,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test }); // Should revert because the proof is wrong. - vm.expectRevert(IOptimismPortal.OptimismPortal_InvalidSuperRootProof.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_InvalidSuperRootProof.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: 0, @@ -883,6 +1209,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// of `proveWithdrawalTransaction` when the provided proof is valid but the index is /// out of bounds. function test_proveWithdrawalTransaction_superRootsVersionBadIndex_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Enable super roots. setSuperRootsActive(true); @@ -903,8 +1231,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test vm.mockCall(address(game), abi.encodeCall(game.rootClaim, ()), abi.encode(expectedSuperRoot)); // Should revert because the proof is wrong. - vm.expectRevert(IOptimismPortal.OptimismPortal_InvalidOutputRootIndex.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_InvalidOutputRootIndex.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: outputRootWithChainIdArr.length, // out of bounds @@ -918,6 +1246,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// of `proveWithdrawalTransaction` when the provided proof is valid, index is correct, /// but the output root has the wrong chain id. function test_proveWithdrawalTransaction_superRootsVersionBadChainId_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Enable super roots. setSuperRootsActive(true); @@ -940,8 +1270,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test vm.mockCall(address(game), abi.encodeCall(game.rootClaim, ()), abi.encode(expectedSuperRoot)); // Should revert because the proof is wrong. - vm.expectRevert(IOptimismPortal.OptimismPortal_InvalidOutputRootChainId.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_InvalidOutputRootChainId.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: 0, @@ -955,6 +1285,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// of `proveWithdrawalTransaction` when the provided proof is valid, index is correct, /// chain id is correct, but the output root proof is invalid. function test_proveWithdrawalTransaction_superRootsVersionBadOutputRootProof_reverts() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Enable super roots. setSuperRootsActive(true); @@ -977,8 +1309,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test vm.mockCall(address(game), abi.encodeCall(game.rootClaim, ()), abi.encode(expectedSuperRoot)); // Should revert because the proof is wrong. - vm.expectRevert(IOptimismPortal.OptimismPortal_InvalidOutputRootProof.selector); - optimismPortal2.proveWithdrawalTransaction({ + vm.expectRevert(IOptimismPortalInterop.OptimismPortal_InvalidOutputRootProof.selector); + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: 0, @@ -990,6 +1322,8 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test /// @notice Tests that `proveWithdrawalTransaction` succeeds when all parameters are valid. function test_proveWithdrawalTransaction_superRootsVersion_succeeds() external { + skipIfDevFeatureDisabled(DevFeatures.OPTIMISM_PORTAL_INTEROP); + // Enable super roots. setSuperRootsActive(true); @@ -1010,7 +1344,7 @@ contract OptimismPortal2_ProveWithdrawalTransaction_Test is OptimismPortal2_Test vm.mockCall(address(game), abi.encodeCall(game.rootClaim, ()), abi.encode(expectedSuperRoot)); // Should succeed. - optimismPortal2.proveWithdrawalTransaction({ + IOptimismPortalInterop(payable(optimismPortal2)).proveWithdrawalTransaction({ _tx: _defaultTx, _disputeGameProxy: game, _outputRootIndex: 0, @@ -1045,9 +1379,11 @@ contract OptimismPortal2_FinalizeWithdrawalTransaction_Test is OptimismPortal2_T vm.expectRevert(IOptimismPortal.OptimismPortal_BadTarget.selector); optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); - _defaultTx.target = address(ethLockbox); - vm.expectRevert(IOptimismPortal.OptimismPortal_BadTarget.selector); - optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); + if (isUsingLockbox()) { + _defaultTx.target = address(ethLockbox); + vm.expectRevert(IOptimismPortal.OptimismPortal_BadTarget.selector); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); + } } /// @notice Tests that `finalizeWithdrawalTransaction` reverts if the target reverts and caller @@ -1115,7 +1451,10 @@ contract OptimismPortal2_FinalizeWithdrawalTransaction_Test is OptimismPortal2_T // Fund the portal so that we can withdraw ETH. vm.store(address(optimismPortal2), bytes32(uint256(61)), bytes32(uint256(0xFFFFFFFF))); - vm.deal(address(ethLockbox), 0xFFFFFFFF); + vm.deal(address(optimismPortal2), 0xFFFFFFFF); + if (isUsingLockbox()) { + vm.deal(address(ethLockbox), 0xFFFFFFFF); + } uint256 bobBalanceBefore = bob.balance; @@ -1333,6 +1672,51 @@ contract OptimismPortal2_FinalizeWithdrawalTransaction_Test is OptimismPortal2_T /// @notice Tests that `finalizeWithdrawalTransaction` reverts if the target reverts. function test_finalizeWithdrawalTransaction_targetFails_fails() external { + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.deal(address(optimismPortal2), 0); // no balance + } + + uint256 bobBalanceBefore = address(bob).balance; + vm.etch(bob, hex"fe"); // Contract with just the invalid opcode. + + vm.expectEmit(true, true, true, true); + emit WithdrawalProven(_withdrawalHash, alice, bob); + vm.expectEmit(true, true, true, true); + emit WithdrawalProvenExtension1(_withdrawalHash, address(this)); + optimismPortal2.proveWithdrawalTransaction({ + _tx: _defaultTx, + _disputeGameIndex: _proposedGameIndex, + _outputRootProof: _outputRootProof, + _withdrawalProof: _withdrawalProof + }); + + // Resolve the dispute game. + game.resolveClaim(0, 0); + game.resolve(); + + vm.warp(block.timestamp + optimismPortal2.proofMaturityDelaySeconds() + 1); + vm.expectEmit(true, true, true, true); + emit WithdrawalFinalized(_withdrawalHash, false); + optimismPortal2.finalizeWithdrawalTransaction(_defaultTx); + + // Bob's balance should not have changed. + assertEq(address(bob).balance, bobBalanceBefore); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + // OptimismPortal2 should not have any stuck ETH. + assertEq(address(optimismPortal2).balance, 0); + } + } + + /// @notice Tests that `finalizeWithdrawalTransaction` reverts if the target reverts when + /// using the ETHLockbox. + function test_finalizeWithdrawalTransaction_lockboxAndTargetFails_fails() external { + // Enable the ETHLockbox. + address dummyLockbox = address(0xdeadbeef); + forceEnableLockbox(dummyLockbox); + vm.deal(address(dummyLockbox), 0xFFFFFFFF); + vm.deal(address(optimismPortal2), _defaultTx.value); + uint256 bobBalanceBefore = address(bob).balance; vm.etch(bob, hex"fe"); // Contract with just the invalid opcode. @@ -1504,7 +1888,10 @@ contract OptimismPortal2_FinalizeWithdrawalTransaction_Test is OptimismPortal2_T // Total ETH supply is currently about 120M ETH. uint256 value = bound(_value, 0, 200_000_000 ether); - vm.deal(address(ethLockbox), value); + vm.deal(address(optimismPortal2), value); + if (isUsingLockbox()) { + vm.deal(address(ethLockbox), value); + } uint256 gasLimit = bound(_gasLimit, 0, 50_000_000); uint256 nonce = l2ToL1MessagePasser.messageNonce(); @@ -1585,7 +1972,10 @@ contract OptimismPortal2_FinalizeWithdrawalTransaction_Test is OptimismPortal2_T // Total ETH supply is currently about 120M ETH. uint256 value = bound(_value, 0, 200_000_000 ether); - vm.deal(address(ethLockbox), value); + vm.deal(address(optimismPortal2), value); + if (isUsingLockbox()) { + vm.deal(address(ethLockbox), value); + } uint256 gasLimit = bound(_gasLimit, 0, 50_000_000); uint256 nonce = l2ToL1MessagePasser.messageNonce(); @@ -2117,8 +2507,10 @@ contract OptimismPortal2_DepositTransaction_Test is OptimismPortal2_TestInit { _data: _data }); - // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. - vm.expectCall(address(ethLockbox), _mint, abi.encodeCall(ethLockbox.lockETH, ()), _mint > 0 ? 1 : 0); + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. + vm.expectCall(address(ethLockbox), _mint, abi.encodeCall(ethLockbox.lockETH, ()), _mint > 0 ? 1 : 0); + } vm.deal(depositor, _mint); vm.prank(depositor, depositor); @@ -2130,8 +2522,12 @@ contract OptimismPortal2_DepositTransaction_Test is OptimismPortal2_TestInit { _data: _data }); - assertEq(address(optimismPortal2).balance, balanceBefore); - assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, balanceBefore); + assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + } else { + assertEq(address(optimismPortal2).balance, balanceBefore + _mint); + } } /// @notice Tests that `depositTransaction` succeeds for an EOA using 7702 delegation. @@ -2188,8 +2584,13 @@ contract OptimismPortal2_DepositTransaction_Test is OptimismPortal2_TestInit { _isCreation: _isCreation, _data: _data }); - assertEq(address(optimismPortal2).balance, portalBalanceBefore); - assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, portalBalanceBefore); + assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + } else { + assertEq(address(optimismPortal2).balance, portalBalanceBefore + _mint); + } } /// @notice Tests that `depositTransaction` succeeds for a contract. @@ -2229,8 +2630,10 @@ contract OptimismPortal2_DepositTransaction_Test is OptimismPortal2_TestInit { _data: _data }); - // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. - vm.expectCall(address(ethLockbox), _mint, abi.encodeCall(ethLockbox.lockETH, ()), _mint > 0 ? 1 : 0); + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + // Expect call to the ETHLockbox to lock the funds only if the value is greater than 0. + vm.expectCall(address(ethLockbox), _mint, abi.encodeCall(ethLockbox.lockETH, ()), _mint > 0 ? 1 : 0); + } vm.deal(address(this), _mint); vm.prank(address(this)); @@ -2241,8 +2644,13 @@ contract OptimismPortal2_DepositTransaction_Test is OptimismPortal2_TestInit { _isCreation: _isCreation, _data: _data }); - assertEq(address(optimismPortal2).balance, balanceBefore); - assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + + if (isSysFeatureEnabled(Features.ETH_LOCKBOX)) { + assertEq(address(optimismPortal2).balance, balanceBefore); + assertEq(address(ethLockbox).balance, lockboxBalanceBefore + _mint); + } else { + assertEq(address(optimismPortal2).balance, balanceBefore + _mint); + } } } @@ -2369,7 +2777,7 @@ contract OptimismPortal2_Params_Test is CommonTest { // The value passed to the initialize must be larger than the last value // that initialize was called with. IProxy(payable(address(optimismPortal2))).upgradeToAndCall( - address(nextImpl), abi.encodeCall(NextImpl.initialize, (3)) + address(nextImpl), abi.encodeCall(NextImpl.initialize, (optimismPortal2.initVersion() + 1)) ); assertEq(IProxy(payable(address(optimismPortal2))).implementation(), address(nextImpl)); diff --git a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol index 185eed02a6eeb..ec5c1ba228632 100644 --- a/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol +++ b/packages/contracts-bedrock/test/L1/ProtocolVersions.t.sol @@ -72,6 +72,16 @@ contract ProtocolVersions_Initialize_Test is ProtocolVersions_TestInit { } } +/// @title ProtocolVersions_Version_Test +/// @notice Test contract for ProtocolVersions `version` constant. +contract ProtocolVersions_Version_Test is ProtocolVersions_TestInit { + /// @notice Tests that the version function returns a valid string. We avoid testing + /// the specific value as it changes frequently. + function test_version_succeeds() external view { + assertGt(bytes(protocolVersions.version()).length, 0); + } +} + /// @title ProtocolVersions_SetRequired_Test /// @notice Test contract for ProtocolVersions `setRequired` function. contract ProtocolVersions_SetRequired_Test is ProtocolVersions_TestInit { diff --git a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol index 2753d9182d60c..ce63b4a503756 100644 --- a/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SuperchainConfig.t.sol @@ -7,7 +7,6 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { ForgeArtifacts, StorageSlot } from "scripts/libraries/ForgeArtifacts.sol"; -import { Constants } from "src/libraries/Constants.sol"; // Interfaces import { IProxy } from "interfaces/universal/IProxy.sol"; @@ -76,8 +75,8 @@ contract SuperchainConfig_Initialize_Test is SuperchainConfig_TestInit { /// owner. /// @param _sender The address of the sender to test. function testFuzz_initialize_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - // Prank as the not ProxyAdmin or ProxyAdmin owner. - vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); + // Prank as not the superchain ProxyAdmin or ProxyAdmin owner. + vm.assume(_sender != address(superchainProxyAdmin) && _sender != superchainProxyAdminOwner); // Get the slot for _initialized. StorageSlot memory slot = ForgeArtifacts.getSlot("SuperchainConfig", "_initialized"); @@ -94,77 +93,6 @@ contract SuperchainConfig_Initialize_Test is SuperchainConfig_TestInit { } } -/// @title SuperchainConfig_Upgrade_Test -/// @notice Test contract for SuperchainConfig `upgrade` function. -contract SuperchainConfig_Upgrade_Test is SuperchainConfig_TestInit { - /// @notice Tests that `upgrade` successfully upgrades the contract. - function test_upgrade_succeeds() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SuperchainConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(superchainConfig), bytes32(slot.slot), bytes32(0)); - - // Get the slot for the SuperchainConfig's ProxyAdmin. - address proxyAdminAddress = - address(uint160(uint256(vm.load(address(superchainConfig), Constants.PROXY_OWNER_ADDRESS)))); - - // Upgrade the contract. - vm.prank(proxyAdminAddress); - superchainConfig.upgrade(); - - // Check that the guardian slot was updated. - bytes32 guardianSlot = bytes32(uint256(keccak256("superchainConfig.guardian")) - 1); - assertEq(vm.load(address(superchainConfig), guardianSlot), bytes32(0)); - - // Check that the paused slot was cleared. - bytes32 pausedSlot = bytes32(uint256(keccak256("superchainConfig.paused")) - 1); - assertEq(vm.load(address(superchainConfig), pausedSlot), bytes32(0)); - } - - /// @notice Tests that `upgrade` reverts when called a second time. - function test_upgrade_upgradeTwice_reverts() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SuperchainConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(superchainConfig), bytes32(slot.slot), bytes32(0)); - - // Get the slot for the SuperchainConfig's ProxyAdmin. - address proxyAdminAddress = - address(uint160(uint256(vm.load(address(superchainConfig), Constants.PROXY_OWNER_ADDRESS)))); - - // Trigger first upgrade. - vm.prank(proxyAdminAddress); - superchainConfig.upgrade(); - - // Trigger second upgrade. - vm.prank(proxyAdminAddress); - vm.expectRevert("Initializable: contract is already initialized"); - superchainConfig.upgrade(); - } - - /// @notice Tests that `upgrade` reverts when called by a non-proxy admin or owner. - /// @param _sender The address of the sender to test. - function testFuzz_upgrade_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - // Prank as the not ProxyAdmin or ProxyAdmin owner. - vm.assume(_sender != address(proxyAdmin) && _sender != proxyAdminOwner); - - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SuperchainConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(superchainConfig), bytes32(slot.slot), bytes32(0)); - - // Expect the revert with `ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner` selector. - vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); - - // Call the `upgrade` function with the sender - vm.prank(_sender); - superchainConfig.upgrade(); - } -} - /// @title SuperchainConfig_PauseExpiry_Test /// @notice Test contract for SuperchainConfig `pauseExpiry` function. contract SuperchainConfig_PauseExpiry_Test is SuperchainConfig_TestInit { @@ -434,9 +362,9 @@ contract SuperchainConfig_PauseTimestamps_Test is SuperchainConfig_TestInit { /// @title SuperchainConfig_Version_Test /// @notice Test contract for SuperchainConfig `version` getter function. contract SuperchainConfig_Version_Test is SuperchainConfig_TestInit { - /// @notice Tests that `version` returns the correct version string. + /// @notice Tests that `version` returns a version string. function test_version_succeeds() external view { - assertEq(superchainConfig.version(), "2.3.0"); + assert(bytes(superchainConfig.version()).length > 0); } } diff --git a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol index 9b1e1c3d813e5..cfa090517b154 100644 --- a/packages/contracts-bedrock/test/L1/SystemConfig.t.sol +++ b/packages/contracts-bedrock/test/L1/SystemConfig.t.sol @@ -10,6 +10,7 @@ import { ForgeArtifacts, StorageSlot } from "scripts/libraries/ForgeArtifacts.so // Libraries import { Constants } from "src/libraries/Constants.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { Features } from "src/libraries/Features.sol"; // Interfaces import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; @@ -22,6 +23,8 @@ import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; contract SystemConfig_TestInit is CommonTest { event ConfigUpdate(uint256 indexed version, ISystemConfig.UpdateType indexed updateType, bytes data); + bytes32 public constant EXAMPLE_FEATURE = "EXAMPLE_FEATURE"; + address batchInbox; address owner; bytes32 batcherHash; @@ -34,7 +37,6 @@ contract SystemConfig_TestInit is CommonTest { function setUp() public virtual override { super.setUp(); - skipIfForkTest("SystemConfig_Initialize_Test: cannot test initialization on forked network"); batchInbox = deploy.cfg().batchInboxAddress(); owner = deploy.cfg().finalSystemOwner(); basefeeScalar = deploy.cfg().basefeeScalar(); @@ -92,6 +94,12 @@ contract SystemConfig_Constructor_Test is SystemConfig_TestInit { /// @title SystemConfig_Initialize_Test /// @notice Test contract for SystemConfig `initialize` function. contract SystemConfig_Initialize_Test is SystemConfig_TestInit { + /// @notice Skips the test if it's running on a forked network. + function setUp() public override { + super.setUp(); + skipIfForkTest("SystemConfig_Initialize_Test: cannot test initialization on forked network"); + } + /// @notice Tests that initialization sets the correct values. function test_initialize_succeeds() external view { assertEq(systemConfig.owner(), owner); @@ -220,99 +228,6 @@ contract SystemConfig_Initialize_Test is SystemConfig_TestInit { } } -/// @title SystemConfig_upgrade_Test -/// @notice Reusable test for the current upgrade() function in the SystemConfig contract. If -/// the upgrade() function is changed, tests inside of this contract should be updated to -/// reflect the new function. If the upgrade() function is removed, remove the -/// corresponding tests but leave this contract in place so it's easy to add tests back -/// in the future. -contract SystemConfig_Upgrade_Test is SystemConfig_TestInit { - /// @notice Tests that the upgrade() function succeeds. - function test_upgrade_succeeds() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SystemConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(systemConfig), bytes32(slot.slot), bytes32(0)); - - // Verify the initial dispute game factory slot is non-zero. - // We set a value here since it seems this defaults to zero. - bytes32 disputeGameFactorySlot = bytes32(uint256(keccak256("systemconfig.disputegamefactory")) - 1); - vm.store(address(systemConfig), disputeGameFactorySlot, bytes32(uint256(1))); - assertNotEq(systemConfig.disputeGameFactory(), address(0)); - assertNotEq(vm.load(address(systemConfig), disputeGameFactorySlot), bytes32(0)); - - // Trigger upgrade(). - vm.prank(address(systemConfig.proxyAdmin())); - systemConfig.upgrade(1234, ISuperchainConfig(address(0xdeadbeef))); - - // Verify that the initialized slot was updated. - bytes32 initializedSlotAfter = vm.load(address(systemConfig), bytes32(slot.slot)); - assertEq(initializedSlotAfter, bytes32(uint256(2))); - - // Verify that the l2ChainId was updated. - assertEq(systemConfig.l2ChainId(), 1234); - - // Verify that the dispute game factory address was cleared. - assertEq(vm.load(address(systemConfig), disputeGameFactorySlot), bytes32(0)); - } - - /// @notice Tests that the upgrade() function reverts if called a second time. - function test_upgrade_upgradeTwice_reverts() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SystemConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(systemConfig), bytes32(slot.slot), bytes32(0)); - - // Trigger first upgrade. - vm.prank(address(systemConfig.proxyAdmin())); - systemConfig.upgrade(1234, ISuperchainConfig(address(0xdeadbeef))); - - // Try to trigger second upgrade. - vm.prank(address(systemConfig.proxyAdmin())); - vm.expectRevert("Initializable: contract is already initialized"); - systemConfig.upgrade(1234, ISuperchainConfig(address(0xdeadbeef))); - } - - /// @notice Tests that the upgrade() function reverts if called after initialization. - function test_upgrade_afterInitialization_reverts() external { - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SystemConfig", "_initialized"); - - // Slot value should be set to 2 (already initialized). - bytes32 initializedSlotBefore = vm.load(address(systemConfig), bytes32(slot.slot)); - assertEq(initializedSlotBefore, bytes32(uint256(2))); - - // l2ChainId should be non-zero. - assertNotEq(systemConfig.l2ChainId(), 0); - - // Try to trigger upgrade(). - vm.expectRevert("Initializable: contract is already initialized"); - systemConfig.upgrade(1234, ISuperchainConfig(address(0xdeadbeef))); - } - - /// @notice Tests that the upgrade() function reverts if called by a non-proxy admin or owner. - /// @param _sender The address of the sender to test. - function testFuzz_upgrade_notProxyAdminOrProxyAdminOwner_reverts(address _sender) public { - // Prank as the not ProxyAdmin or ProxyAdmin owner. - vm.assume(_sender != address(systemConfig.proxyAdmin()) && _sender != systemConfig.proxyAdminOwner()); - - // Get the slot for _initialized. - StorageSlot memory slot = ForgeArtifacts.getSlot("SystemConfig", "_initialized"); - - // Set the initialized slot to 0. - vm.store(address(systemConfig), bytes32(slot.slot), bytes32(0)); - - // Expect the revert with `ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner` selector. - vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); - - // Call the `upgrade` function with the sender - vm.prank(_sender); - systemConfig.upgrade(1234, ISuperchainConfig(address(0xdeadbeef))); - } -} - /// @title SystemConfig_StartBlock_Test /// @notice Test contract for SystemConfig `startBlock` function. contract SystemConfig_StartBlock_Test is SystemConfig_TestInit { @@ -678,28 +593,45 @@ contract SystemConfig_SetResourceConfig_Test is SystemConfig_TestInit { /// @title SystemConfig_Paused_Test /// @notice Test contract for SystemConfig `paused` function. contract SystemConfig_Paused_Test is SystemConfig_TestInit { - /// @notice Tests that `paused()` returns the correct value. - function test_paused_succeeds() external view { - assertEq(systemConfig.paused(), superchainConfig.paused(address(0))); + /// @notice Tests that `paused()` returns false when no pauses are active. + function test_paused_noPauses_succeeds() external view { + assertFalse(systemConfig.paused()); } - /// @notice Tests that `paused()` returns the correct value after pausing. - function test_paused_afterPause_succeeds() external { + /// @notice Tests that `paused()` returns true when global pause is active. + function test_paused_globalPause_succeeds() external { // Initially not paused assertFalse(systemConfig.paused()); - assertEq(systemConfig.paused(), superchainConfig.paused(address(0))); - // Pause the system + // Pause the system globally vm.prank(superchainConfig.guardian()); superchainConfig.pause(address(0)); // Verify paused state assertTrue(systemConfig.paused()); - assertEq(systemConfig.paused(), superchainConfig.paused(address(0))); } - /// @notice Tests that `paused()` returns true when the ETHLockbox identifier is set. + /// @notice Tests that `paused()` returns true when OptimismPortal identifier is paused and + /// the ETH_LOCKBOX feature is disabled. + function test_paused_optimismPortalIdentifier_succeeds() external { + skipIfSysFeatureEnabled(Features.ETH_LOCKBOX); + + // Initially not paused + assertFalse(systemConfig.paused()); + + // Pause the system with OptimismPortal identifier + vm.prank(superchainConfig.guardian()); + superchainConfig.pause(address(optimismPortal2)); + + // Verify paused state + assertTrue(systemConfig.paused()); + } + + /// @notice Tests that `paused()` returns true when ETHLockbox identifier is paused and + /// ETH_LOCKBOX feature is enabled. function test_paused_ethLockboxIdentifier_succeeds() external { + skipIfSysFeatureDisabled(Features.ETH_LOCKBOX); + // Initially not paused assertFalse(systemConfig.paused()); @@ -711,20 +643,232 @@ contract SystemConfig_Paused_Test is SystemConfig_TestInit { assertTrue(systemConfig.paused()); } - /// @notice Tests that `paused()` returns false when any other address is set. - function test_paused_otherAddress_works() external { + /// @notice Tests that `paused()` returns true when both pauses are active. + function test_paused_bothPausesActive_succeeds() external { + assertFalse(systemConfig.paused()); + + // Pause both globally and with identifier + vm.startPrank(superchainConfig.guardian()); + superchainConfig.pause(address(0)); + superchainConfig.pause(address(optimismPortal2)); + vm.stopPrank(); + + // Verify paused state + assertTrue(systemConfig.paused()); + } + + /// @notice Tests that `paused()` returns false when any other address is paused. + /// @param _address The address to pause. + function testFuzz_paused_otherAddress_succeeds(address _address) external { + vm.assume(_address != address(0)); + vm.assume(_address != address(optimismPortal2)); + vm.assume(_address != address(ethLockbox)); + // Initially not paused assertFalse(systemConfig.paused()); - // Pause the system with a different address + // Pause the system with a different address that's not global or identifier vm.prank(superchainConfig.guardian()); - superchainConfig.pause(address(0x1234)); + superchainConfig.pause(_address); // Verify still not paused assertFalse(systemConfig.paused()); } } +/// @title SystemConfig_SetFeature_Test +/// @notice Test contract for SystemConfig `setFeature` function. +contract SystemConfig_SetFeature_Test is SystemConfig_TestInit { + event FeatureSet(bytes32 indexed feature, bool indexed enabled); + + /// @notice Tests that `setFeature` reverts if the caller is not ProxyAdmin or ProxyAdmin owner. + /// @param _sender The address to test. + function testFuzz_setFeature_notProxyAdminOrProxyAdminOwner_reverts(address _sender) external { + // Ensure sender is not ProxyAdmin or ProxyAdmin owner + vm.assume(_sender != address(systemConfig.proxyAdmin()) && _sender != systemConfig.proxyAdminOwner()); + + vm.expectRevert(IProxyAdminOwnedBase.ProxyAdminOwnedBase_NotProxyAdminOrProxyAdminOwner.selector); + vm.prank(_sender); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + } + + /// @notice Tests that `setFeature` enables a feature successfully when called by ProxyAdmin. + function test_setFeature_enableFeatureByProxyAdmin_succeeds() external { + vm.expectEmit(address(systemConfig)); + emit FeatureSet(EXAMPLE_FEATURE, true); + + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + + // Verify feature is now enabled + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } + + /// @notice Tests that `setFeature` disables a feature successfully when called by ProxyAdmin. + function test_setFeature_disableFeatureByProxyAdmin_succeeds() external { + // First enable the feature + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + + vm.expectEmit(address(systemConfig)); + emit FeatureSet(EXAMPLE_FEATURE, false); + + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, false); + + // Verify feature is now disabled + assertFalse(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } + + /// @notice Tests that `setFeature` enables a feature successfully when called by ProxyAdmin owner. + function test_setFeature_enableFeatureByProxyAdminOwner_succeeds() external { + vm.expectEmit(address(systemConfig)); + emit FeatureSet(EXAMPLE_FEATURE, true); + + vm.prank(systemConfig.proxyAdminOwner()); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + + // Verify feature is now enabled + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } + + /// @notice Tests that `setFeature` can toggle the same feature multiple times. + function test_setFeature_multipleToggles_succeeds() external { + address proxyAdmin = address(systemConfig.proxyAdmin()); + + // Enable feature + vm.prank(proxyAdmin); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + + // Disable feature + vm.prank(proxyAdmin); + systemConfig.setFeature(EXAMPLE_FEATURE, false); + assertFalse(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + + // Enable again + vm.prank(proxyAdmin); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } + + /// @notice Tests that `setFeature` reverts when trying to enable a feature that is already + /// enabled. + function test_setFeature_alreadyEnabled_reverts() external { + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + + vm.prank(address(systemConfig.proxyAdmin())); + vm.expectRevert(ISystemConfig.SystemConfig_InvalidFeatureState.selector); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + } + + /// @notice Tests that `setFeature` reverts when trying to disable a feature that is already + /// disabled. + function test_setFeature_alreadyDisabled_reverts() external { + vm.prank(address(systemConfig.proxyAdmin())); + vm.expectRevert(ISystemConfig.SystemConfig_InvalidFeatureState.selector); + systemConfig.setFeature("EXAMPLE FEATURE", false); + } + + /// @notice Tests that disabling ETH_LOCKBOX reverts if the OptimismPortal has a non-zero + /// ETHLockbox configured. + function test_setFeature_ethLockboxDisableWhileConfigured_reverts() external { + address proxyAdmin = address(systemConfig.proxyAdmin()); + + // Ensure ETH_LOCKBOX is enabled first (no pause active in fresh setup). + if (!systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + assertTrue(systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)); + } + + // Force the portal to have a configured ETHLockbox address. + StorageSlot memory slot = ForgeArtifacts.getSlot("OptimismPortal2", "ethLockbox"); + vm.store(address(optimismPortal2), bytes32(slot.slot), bytes32(uint256(uint160(address(1))))); + + // Disabling should revert due to safety check while lockbox is configured. + vm.expectRevert(ISystemConfig.SystemConfig_InvalidFeatureState.selector); + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, false); + } + + /// @notice Tests that enabling ETH_LOCKBOX while the system is paused (global) reverts. + function test_setFeature_ethLockboxEnableWhilePaused_reverts() external { + address proxyAdmin = address(systemConfig.proxyAdmin()); + + // Ensure ETH_LOCKBOX is enabled first (no pause active in fresh setup). + if (!systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + assertTrue(systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)); + } + + // Pause globally. + vm.prank(superchainConfig.guardian()); + superchainConfig.pause(address(0)); + + // Enabling while paused should revert. + vm.expectRevert(ISystemConfig.SystemConfig_InvalidFeatureState.selector); + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + } + + /// @notice Tests that disabling ETH_LOCKBOX while the system is paused (global) reverts. + function test_setFeature_ethLockboxDisableWhilePaused_reverts() external { + address proxyAdmin = address(systemConfig.proxyAdmin()); + + // Ensure ETH_LOCKBOX is enabled first. + if (!systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, true); + assertTrue(systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)); + } + + // Pause globally. + vm.prank(superchainConfig.guardian()); + superchainConfig.pause(address(0)); + + // Disabling while paused should revert. + vm.expectRevert(ISystemConfig.SystemConfig_InvalidFeatureState.selector); + vm.prank(proxyAdmin); + systemConfig.setFeature(Features.ETH_LOCKBOX, false); + } +} + +/// @title SystemConfig_IsFeatureEnabled_Test +/// @notice Test contract for SystemConfig `isFeatureEnabled` function. +contract SystemConfig_IsFeatureEnabled_Test is SystemConfig_TestInit { + /// @notice Tests that `isFeatureEnabled` returns false for unset features. + /// @param _feature The feature to check. + function testFuzz_isFeatureEnabled_unsetFeature_succeeds(bytes32 _feature) external view { + assertFalse(systemConfig.isFeatureEnabled(_feature)); + } + + /// @notice Tests that `isFeatureEnabled` returns correct value after feature is enabled. + function test_isFeatureEnabled_afterEnable_succeeds() external { + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } + + /// @notice Tests that `isFeatureEnabled` returns correct value after feature is disabled. + function test_isFeatureEnabled_afterDisable_succeeds() external { + // First enable the feature + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, true); + assertTrue(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + + // Then disable it + vm.prank(address(systemConfig.proxyAdmin())); + systemConfig.setFeature(EXAMPLE_FEATURE, false); + assertFalse(systemConfig.isFeatureEnabled(EXAMPLE_FEATURE)); + } +} + /// @title SystemConfig_Guardian_Test /// @notice Test contract for SystemConfig `guardian` function. contract SystemConfig_Guardian_Test is SystemConfig_TestInit { @@ -743,3 +887,23 @@ contract SystemConfig_SuperchainConfig_Test is SystemConfig_TestInit { assertEq(address(systemConfig.superchainConfig()), address(superchainConfig)); } } + +/// @title SystemConfig_SetMinBaseFee_Test +/// @notice Test contract for SystemConfig `setMinBaseFee` function. +contract SystemConfig_SetMinBaseFee_Test is SystemConfig_TestInit { + /// @notice Tests that `setMinBaseFee` reverts if the caller is not the owner. + function test_setMinBaseFee_notOwner_reverts() external { + vm.expectRevert("Ownable: caller is not the owner"); + systemConfig.setMinBaseFee(0); + } + + /// @notice Tests that `setMinBaseFee` updates the min base fee successfully. + function testFuzz_setMinBaseFee_succeeds(uint64 newMinBaseFee) external { + vm.expectEmit(address(systemConfig)); + emit ConfigUpdate(0, ISystemConfig.UpdateType.MIN_BASE_FEE, abi.encode(newMinBaseFee)); + + vm.prank(systemConfig.owner()); + systemConfig.setMinBaseFee(newMinBaseFee); + assertEq(systemConfig.minBaseFee(), newMinBaseFee); + } +} diff --git a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol index 8b92a7dbd5e14..95053e369da11 100644 --- a/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol +++ b/packages/contracts-bedrock/test/L2/GasPriceOracle.t.sol @@ -9,10 +9,6 @@ import { Fork } from "scripts/libraries/Config.sol"; import { Encoding } from "src/libraries/Encoding.sol"; contract GasPriceOracle_Test is CommonTest { - event OverheadUpdated(uint256); - event ScalarUpdated(uint256); - event DecimalsUpdated(uint256); - address depositor; // The initial L1 context values diff --git a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol index ea223778ec541..61ce9d0969688 100644 --- a/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2CrossDomainMessenger.t.sol @@ -35,6 +35,8 @@ contract L2CrossDomainMessenger_Constructor_Test is L2CrossDomainMessenger_TestI assertEq(address(impl.OTHER_MESSENGER()), address(0)); assertEq(address(impl.otherMessenger()), address(0)); assertEq(address(impl.l1CrossDomainMessenger()), address(0)); + assertGt(bytes(impl.version()).length, 0); + assertEq(impl.MESSAGE_VERSION(), 1); } } @@ -46,13 +48,29 @@ contract L2CrossDomainMessenger_Initialize_Test is L2CrossDomainMessenger_TestIn assertEq(address(l2CrossDomainMessenger.OTHER_MESSENGER()), address(l1CrossDomainMessenger)); assertEq(address(l2CrossDomainMessenger.otherMessenger()), address(l1CrossDomainMessenger)); assertEq(address(l2CrossDomainMessenger.l1CrossDomainMessenger()), address(l1CrossDomainMessenger)); + assertGt(bytes(l2CrossDomainMessenger.version()).length, 0); + assertEq(l2CrossDomainMessenger.MESSAGE_VERSION(), 1); + assertGt(l2CrossDomainMessenger.messageNonce(), 0); } } /// @title L2CrossDomainMessenger_SendMessage_Test /// @notice Tests the `sendMessage` function of the `L2CrossDomainMessenger` contract. contract L2CrossDomainMessenger_SendMessage_Test is L2CrossDomainMessenger_TestInit { - /// @notice Tests that `sendMessage` executes successfully. + /// @notice Tests that `sendMessage` executes successfully with various target addresses and gas limits. + function testFuzz_sendMessage_withValidTargetAndGasLimit_succeeds(address _target, uint32 _minGasLimit) external { + vm.assume(_target != address(0)); + _minGasLimit = uint32(bound(_minGasLimit, 21000, 30_000_000)); + + uint256 initialNonce = l2CrossDomainMessenger.messageNonce(); + + vm.prank(alice); + l2CrossDomainMessenger.sendMessage(_target, hex"1234", _minGasLimit); + + assertEq(l2CrossDomainMessenger.messageNonce(), initialNonce + 1); + } + + /// @notice Tests that `sendMessage` executes successfully with the original test case. function test_sendMessage_succeeds() external { bytes memory xDomainCallData = Encoding.encodeCrossDomainMessage(l2CrossDomainMessenger.messageNonce(), alice, recipient, 0, 100, hex"ff"); @@ -64,7 +82,6 @@ contract L2CrossDomainMessenger_SendMessage_Test is L2CrossDomainMessenger_TestI ) ); - // MessagePassed event vm.expectEmit(true, true, true, true); emit MessagePassed( l2ToL1MessagePasser.messageNonce(), @@ -89,21 +106,19 @@ contract L2CrossDomainMessenger_SendMessage_Test is L2CrossDomainMessenger_TestI l2CrossDomainMessenger.sendMessage(recipient, hex"ff", uint32(100)); } - /// @notice Tests that `sendMessage` can be called twice and that the nonce increments - /// correctly. + /// @notice Tests that `sendMessage` can be called twice and that the nonce increments correctly. function test_sendMessage_twice_succeeds() external { uint256 nonce = l2CrossDomainMessenger.messageNonce(); l2CrossDomainMessenger.sendMessage(recipient, hex"aa", uint32(500_000)); l2CrossDomainMessenger.sendMessage(recipient, hex"aa", uint32(500_000)); - // the nonce increments for each message sent assertEq(nonce + 2, l2CrossDomainMessenger.messageNonce()); } } -/// @title L2CrossDomainMessenger_Unclassified_Test +/// @title L2CrossDomainMessenger_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `L2CrossDomainMessenger` contract. -contract L2CrossDomainMessenger_Unclassified_Test is L2CrossDomainMessenger_TestInit { +contract L2CrossDomainMessenger_Uncategorized_Test is L2CrossDomainMessenger_TestInit { /// @notice Tests that `messageNonce` can be decoded correctly. function test_messageVersion_succeeds() external view { (, uint16 version) = Encoding.decodeVersionedNonce(l2CrossDomainMessenger.messageNonce()); diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol index b7aedc1ac6713..a3f7297426ba1 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridge.t.sol @@ -397,10 +397,10 @@ contract L2StandardBridge_WithdrawTo_Test is L2StandardBridge_TestInit { } } -/// @title L2StandardBridge_Unclassified_Test +/// @title L2StandardBridge_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `L2StandardBridge` /// contract. -contract L2StandardBridge_Unclassified_Test is L2StandardBridge_TestInit { +contract L2StandardBridge_Uncategorized_Test is L2StandardBridge_TestInit { /// @notice Ensures that the L2StandardBridge is always not paused. The pausability happens /// on L1 and not L2. function test_paused_succeeds() external view { diff --git a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol index cb1ebc336f83c..1fb3d2386907c 100644 --- a/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ToL2CrossDomainMessenger.t.sol @@ -775,8 +775,12 @@ contract L2ToL2CrossDomainMessenger_RelayMessage_Test is L2ToL2CrossDomainMessen ) external { - // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger - vm.assume(_target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + // Ensure that the target contract is not CrossL2Inbox or L2ToL2CrossDomainMessenger or the + // foundry VM + vm.assume( + _target != Predeploys.CROSS_L2_INBOX && _target != Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER + && _target != foundryVMAddress + ); // Ensure that the target call is payable if value is sent if (_value > 0) assumePayable(_target); @@ -785,7 +789,6 @@ contract L2ToL2CrossDomainMessenger_RelayMessage_Test is L2ToL2CrossDomainMessen vm.mockCallRevert({ callee: _target, msgValue: _value, data: _message, revertData: _revertData }); // Construct the identifier -- using some hardcoded values for the block number, log index, - // and time to avoid stack too deep errors. Identifier memory id = Identifier(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, 1, 1, 1, _source); diff --git a/packages/contracts-bedrock/test/L2/OptimismMintableERC721.t.sol b/packages/contracts-bedrock/test/L2/OptimismMintableERC721.t.sol index aa503d0a96e59..42420046af546 100644 --- a/packages/contracts-bedrock/test/L2/OptimismMintableERC721.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismMintableERC721.t.sol @@ -156,10 +156,10 @@ contract OptimismMintableERC721_SupportsInterface_Test is OptimismMintableERC721 } } -/// @title OptimismMintableERC721_Unclassified_Test +/// @title OptimismMintableERC721_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `OptimismMintableERC721` contract. -contract OptimismMintableERC721_Unclassified_Test is OptimismMintableERC721_TestInit { +contract OptimismMintableERC721_Uncategorized_Test is OptimismMintableERC721_TestInit { /// @notice Tests that the `tokenURI` function returns the correct URI for a minted token. function test_tokenURI_succeeds() external { // Mint the token first. diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index 80f8d7590a220..b59bd897feeb7 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -300,10 +300,10 @@ contract OptimismSuperchainERC20_SupportsInterface_Test is OptimismSuperchainERC } } -/// @title OptimismSuperchainERC20_Unclassified_Test +/// @title OptimismSuperchainERC20_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `OptimismSuperchainERC20` contract. -contract OptimismSuperchainERC20_Unclassified_Test is OptimismSuperchainERC20_TestInit { +contract OptimismSuperchainERC20_Uncategorized_Test is OptimismSuperchainERC20_TestInit { /// @notice Tests that the allowance function returns the max uint256 value when the spender is /// Permit. /// @param _randomCaller The address that will call the function - used to fuzz better since diff --git a/packages/contracts-bedrock/test/cannon/MIPS64.t.sol b/packages/contracts-bedrock/test/cannon/MIPS64.t.sol index 043363dae8f1b..fdcf0671ab222 100644 --- a/packages/contracts-bedrock/test/cannon/MIPS64.t.sol +++ b/packages/contracts-bedrock/test/cannon/MIPS64.t.sol @@ -6,7 +6,7 @@ import { Test } from "forge-std/Test.sol"; import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { UnsupportedStateVersion } from "src/cannon/libraries/CannonErrors.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; -import { IMIPS2 } from "interfaces/cannon/IMIPS2.sol"; +import { IMIPS64 } from "interfaces/cannon/IMIPS64.sol"; /// @title MIPS64_TestInit /// @notice Reusable test initialization for `MIPS64` tests. @@ -39,25 +39,25 @@ contract MIPS64_TestInit is Test { } /// @notice Deploys new MIPS64 contract with the given version parameter. - function deployVm(uint256 version) internal returns (IMIPS2) { - return IMIPS2( + function deployVm(uint256 version) internal returns (IMIPS64) { + return IMIPS64( DeployUtils.create1({ _name: "MIPS64", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS2.__constructor__, (oracle, version))) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IMIPS64.__constructor__, (oracle, version))) }) ); } } -/// @title MIPS64_Unclassified_Test +/// @title MIPS64_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `MIPS64` contract or /// are testing multiple functions at once. -contract MIPS64_Unclassified_Test is MIPS64_TestInit { +contract MIPS64_Uncategorized_Test is MIPS64_TestInit { /// @notice Test the we can deploy MIPS64 with a valid version parameter. function test_deploy_supportedVersions_succeeds() external { for (uint256 i = 0; i < validVersions.length; i++) { uint256 version = validVersions[i]; - IMIPS2 mips = deployVm(version); + IMIPS64 mips = deployVm(version); assertNotEq(address(mips), address(0)); } } diff --git a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol index 5edb5daffbef4..cc487bc92eea7 100644 --- a/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol +++ b/packages/contracts-bedrock/test/cannon/PreimageOracle.t.sol @@ -1544,10 +1544,10 @@ contract PreimageOracle_SqueezeLPP_Test is PreimageOracle_TestInit { } } -/// @title PreimageOracle_Unclassified_Test +/// @title PreimageOracle_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `PreimageOracle` /// contract or are testing multiple functions at once. -contract PreimageOracle_Unclassified_Test is PreimageOracle_TestInit { +contract PreimageOracle_Uncategorized_Test is PreimageOracle_TestInit { /// @notice Test the pre-image key computation with a known pre-image. function test_keccak256PreimageKey_succeeds() public pure { bytes memory preimage = hex"deadbeef"; diff --git a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol index 186761a185578..58b4e6e624a22 100644 --- a/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol +++ b/packages/contracts-bedrock/test/dispute/DisputeGameFactory.t.sol @@ -18,8 +18,10 @@ import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; import { IProxyAdminOwnedBase } from "interfaces/L1/IProxyAdminOwnedBase.sol"; import { IPreimageOracle } from "interfaces/cannon/IPreimageOracle.sol"; import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IFaultDisputeGameV2 } from "interfaces/dispute/v2/IFaultDisputeGameV2.sol"; import { ISuperFaultDisputeGame } from "interfaces/dispute/ISuperFaultDisputeGame.sol"; import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; +import { IPermissionedDisputeGameV2 } from "interfaces/dispute/v2/IPermissionedDisputeGameV2.sol"; import { ISuperPermissionedDisputeGame } from "interfaces/dispute/ISuperPermissionedDisputeGame.sol"; // Mocks import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; @@ -50,8 +52,11 @@ contract DisputeGameFactory_TestInit is CommonTest { event DisputeGameCreated(address indexed disputeProxy, GameType indexed gameType, Claim indexed rootClaim); event ImplementationSet(address indexed impl, GameType indexed gameType); + event ImplementationArgsSet(GameType indexed gameType, bytes args); event InitBondUpdated(GameType indexed gameType, uint256 indexed newBond); + uint256 l2ChainId = 111; + function setUp() public virtual override { super.setUp(); fakeClone = new DisputeGameFactory_FakeClone_Harness(); @@ -97,6 +102,20 @@ contract DisputeGameFactory_TestInit is CommonTest { }); } + function _getGameConstructorParamsV2(GameType _gameType) + internal + pure + returns (IFaultDisputeGameV2.GameConstructorParams memory params_) + { + return IFaultDisputeGameV2.GameConstructorParams({ + gameType: _gameType, + maxGameDepth: 2 ** 3, + splitDepth: 2 ** 2, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days) + }); + } + function _getSuperGameConstructorParams( Claim _absolutePrestate, AlphabetVM _vm, @@ -111,8 +130,20 @@ contract DisputeGameFactory_TestInit is CommonTest { } function _setGame(address _gameImpl, GameType _gameType) internal { + _setGame(_gameImpl, _gameType, false, ""); + } + + function _setGame(address _gameImpl, GameType _gameType, bytes memory _implArgs) internal { + _setGame(_gameImpl, _gameType, true, _implArgs); + } + + function _setGame(address _gameImpl, GameType _gameType, bool _hasImplArgs, bytes memory _implArgs) internal { vm.startPrank(disputeGameFactory.owner()); - disputeGameFactory.setImplementation(_gameType, IDisputeGame(_gameImpl)); + if (_hasImplArgs) { + disputeGameFactory.setImplementation(_gameType, IDisputeGame(_gameImpl), _implArgs); + } else { + disputeGameFactory.setImplementation(_gameType, IDisputeGame(_gameImpl)); + } disputeGameFactory.setInitBond(_gameType, 0.08 ether); vm.stopPrank(); } @@ -183,6 +214,43 @@ contract DisputeGameFactory_TestInit is CommonTest { _setGame(gameImpl_, GameTypes.CANNON); } + /// @notice Sets up immutable data for fault game v2 implementation + function getFaultDisputeGameV2ImmutableArgs(Claim _absolutePrestate) + internal + returns (bytes memory immutableArgs_, AlphabetVM vm_, IPreimageOracle preimageOracle_) + { + (vm_, preimageOracle_) = _createVM(_absolutePrestate); + // Encode the implementation args for CWIA (tightly packed) + immutableArgs_ = abi.encodePacked( + _absolutePrestate, // 32 bytes + vm_, // 20 bytes + anchorStateRegistry, // 20 bytes + delayedWeth, // 20 bytes + l2ChainId // 32 bytes (l2ChainId) + ); + } + + /// @notice Sets up a fault game v2 implementation + function setupFaultDisputeGameV2(Claim _absolutePrestate) + internal + returns (address gameImpl_, AlphabetVM vm_, IPreimageOracle preimageOracle_) + { + bytes memory immutableArgs; + (immutableArgs, vm_, preimageOracle_) = getFaultDisputeGameV2ImmutableArgs(_absolutePrestate); + gameImpl_ = setupFaultDisputeGameV2(immutableArgs); + } + + function setupFaultDisputeGameV2(bytes memory immutableArgs) internal returns (address gameImpl_) { + gameImpl_ = DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall(IFaultDisputeGameV2.__constructor__, (_getGameConstructorParamsV2(GameTypes.CANNON))) + ) + }); + + _setGame(gameImpl_, GameTypes.CANNON, immutableArgs); + } + function setupPermissionedDisputeGame( Claim _absolutePrestate, address _proposer, @@ -208,6 +276,65 @@ contract DisputeGameFactory_TestInit is CommonTest { _setGame(gameImpl_, GameTypes.PERMISSIONED_CANNON); } + + function changeClaimStatus(Claim _claim, VMStatus _status) public pure returns (Claim out_) { + assembly { + out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) + } + } + + /// @notice Sets up immutable args for PDG v2 implementation + function getPermissionedDisputeGameV2ImmutableArgs( + Claim _absolutePrestate, + address _proposer, + address _challenger + ) + internal + returns (bytes memory implArgs_, AlphabetVM vm_, IPreimageOracle preimageOracle_) + { + (vm_, preimageOracle_) = _createVM(_absolutePrestate); + + // Encode the implementation args for CWIA (tightly packed) + implArgs_ = abi.encodePacked( + _absolutePrestate, // 32 bytes + vm_, // 20 bytes + anchorStateRegistry, // 20 bytes + delayedWeth, // 20 bytes + l2ChainId, // 32 bytes (l2ChainId), + _proposer, // 20 bytes + _challenger // 20 bytes + ); + } + + /// @notice Deploys PDG v2 implementation and sets it on the DGF + function setupPermissionedDisputeGameV2( + Claim _absolutePrestate, + address _proposer, + address _challenger + ) + internal + returns (address gameImpl_, AlphabetVM vm_, IPreimageOracle preimageOracle_) + { + bytes memory implArgs; + (implArgs, vm_, preimageOracle_) = + getPermissionedDisputeGameV2ImmutableArgs(_absolutePrestate, _proposer, _challenger); + + gameImpl_ = setupPermissionedDisputeGameV2(implArgs); + } + + /// @notice Deploys PDG v2 implementation and sets it on the DGF + function setupPermissionedDisputeGameV2(bytes memory _implArgs) internal returns (address gameImpl_) { + gameImpl_ = DeployUtils.create1({ + _name: "PermissionedDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IPermissionedDisputeGameV2.__constructor__, (_getGameConstructorParamsV2(GameTypes.PERMISSIONED_CANNON)) + ) + ) + }); + + _setGame(gameImpl_, GameTypes.PERMISSIONED_CANNON, _implArgs); + } } /// @title DisputeGameFactory_Initialize_Test @@ -375,10 +502,45 @@ contract DisputeGameFactory_Create_Test is DisputeGameFactory_TestInit { disputeGameFactory.create{ value: bondAmount }(gt, rootClaim, extraData); } - function changeClaimStatus(Claim _claim, VMStatus _status) public pure returns (Claim out_) { - assembly { - out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) - } + function test_create_implArgs_succeeds() public { + Claim absolutePrestate = Claim.wrap(bytes32(hex"dead")); + (, AlphabetVM vm_,) = setupFaultDisputeGameV2(absolutePrestate); + + Claim rootClaim = changeClaimStatus(Claim.wrap(bytes32(hex"beef")), VMStatuses.INVALID); + // extraData should contain the l2BlockNumber as first 32 bytes + bytes memory extraData = bytes.concat(bytes32(uint256(type(uint32).max))); + + uint256 bondAmount = disputeGameFactory.initBonds(GameTypes.CANNON); + vm.deal(address(this), bondAmount); + + // Create the game + IDisputeGame proxy = disputeGameFactory.create{ value: bondAmount }(GameTypes.CANNON, rootClaim, extraData); + + // Verify the game was created and stored + (IDisputeGame game, Timestamp timestamp) = disputeGameFactory.games(GameTypes.CANNON, rootClaim, extraData); + + assertEq(address(game), address(proxy)); + assertEq(Timestamp.unwrap(timestamp), block.timestamp); + + // Verify the game has the correct parameters via CWIA + IFaultDisputeGameV2 gameV2 = IFaultDisputeGameV2(address(proxy)); + + // Test CWIA getters + assertEq(Claim.unwrap(gameV2.absolutePrestate()), Claim.unwrap(absolutePrestate)); + assertEq(Claim.unwrap(gameV2.rootClaim()), Claim.unwrap(rootClaim)); + assertEq(gameV2.extraData(), extraData); + assertEq(gameV2.l2ChainId(), l2ChainId); + assertEq(address(gameV2.gameCreator()), address(this)); + assertEq(gameV2.l2BlockNumber(), uint256(type(uint32).max)); + assertEq(address(gameV2.vm()), address(vm_)); + assertEq(address(gameV2.weth()), address(delayedWeth)); + assertEq(address(gameV2.anchorStateRegistry()), address(anchorStateRegistry)); + // Test Constructor args + assertEq(GameType.unwrap(gameV2.gameType()), GameType.unwrap(GameTypes.CANNON)); + assertEq(gameV2.maxGameDepth(), 2 ** 3); + assertEq(gameV2.splitDepth(), 2 ** 2); + assertEq(Duration.unwrap(gameV2.clockExtension()), Duration.unwrap(Duration.wrap(3 hours))); + assertEq(Duration.unwrap(gameV2.maxClockDuration()), Duration.unwrap(Duration.wrap(3.5 days))); } } @@ -405,6 +567,48 @@ contract DisputeGameFactory_SetImplementation_Test is DisputeGameFactory_TestIni vm.expectRevert("Ownable: caller is not the owner"); disputeGameFactory.setImplementation(GameTypes.CANNON, IDisputeGame(address(1))); } + + /// @notice Tests that the `setImplementation` function with args properly sets the implementation + /// and args for a given `GameType`. + function test_setImplementation_withArgs_succeeds() public { + address fakeGame = address(1); + Claim absolutePrestate = Claim.wrap(bytes32(hex"dead")); + AlphabetVM vm_; + IPreimageOracle preimageOracle_; + (vm_, preimageOracle_) = _createVM(absolutePrestate); + uint256 l2ChainId = 111; + + bytes memory args = abi.encodePacked( + absolutePrestate, // 32 bytes + vm_, // 20 bytes + anchorStateRegistry, // 20 bytes + delayedWeth, // 20 bytes + l2ChainId // 32 bytes (l2ChainId) + ); + + vm.expectEmit(true, true, true, true, address(disputeGameFactory)); + emit ImplementationSet(address(1), GameTypes.CANNON); + vm.expectEmit(true, true, true, true, address(disputeGameFactory)); + emit ImplementationArgsSet(GameTypes.CANNON, args); + + // Set the implementation and args for the `GameTypes.CANNON` enum value. + disputeGameFactory.setImplementation(GameTypes.CANNON, IDisputeGame(fakeGame), args); + + // Ensure that the implementation for the `GameTypes.CANNON` enum value is set. + assertEq(address(disputeGameFactory.gameImpls(GameTypes.CANNON)), address(1)); + // Ensure that the args for the `GameTypes.CANNON` enum value are set. + assertEq(disputeGameFactory.gameArgs(GameTypes.CANNON), args); + } + + /// @notice Tests that the `setImplementation` function with args reverts when called by a non-owner. + function test_setImplementationArgs_notOwner_reverts() public { + bytes memory args = abi.encode(uint256(123), address(0xdead)); + + // Ensure that the `setImplementation` function reverts when called by a non-owner. + vm.prank(address(0)); + vm.expectRevert("Ownable: caller is not the owner"); + disputeGameFactory.setImplementation(GameTypes.CANNON, IDisputeGame(address(1)), args); + } } /// @title DisputeGameFactory_SetInitBond_Test @@ -602,10 +806,10 @@ contract DisputeGameFactory_FindLatestGames_Test is DisputeGameFactory_TestInit } } -/// @title DisputeGameFactory_Unclassified_Test +/// @title DisputeGameFactory_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `DisputeGameFactory` /// contract or are testing multiple functions at once. -contract DisputeGameFactory_Unclassified_Test is DisputeGameFactory_TestInit { +contract DisputeGameFactory_Uncategorized_Test is DisputeGameFactory_TestInit { /// @notice Tests that the `owner` function returns the correct address after deployment. function test_owner_succeeds() public view { assertEq(disputeGameFactory.owner(), address(this)); diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 22680bd0840d9..686b4489f5dad 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -2641,10 +2641,10 @@ contract FaultDisputeGame_GetChallengerDuration_Test is FaultDisputeGame_TestIni } } -/// @title FaultDisputeGame_Unclassified_Test +/// @title FaultDisputeGame_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `FaultDisputeGame` /// contract or are testing multiple functions at once. -contract FaultDisputeGame_Unclassified_Test is FaultDisputeGame_TestInit { +contract FaultDisputeGame_Uncategorized_Test is FaultDisputeGame_TestInit { /// @notice Tests that the game's starting timestamp is set correctly. function test_createdAt_succeeds() public view { assertEq(gameProxy.createdAt().raw(), block.timestamp); diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index d1a4458456cdf..30c0e547a2981 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -86,7 +86,7 @@ contract PermissionedDisputeGame_TestInit is DisputeGameFactory_TestInit { assertEq(address(gameProxy.vm()), address(_vm)); // Label the proxy - vm.label(address(gameProxy), "FaultDisputeGame_Clone"); + vm.label(address(gameProxy), "PermissionedDisputeGame_Clone"); } function setUp() public override { @@ -189,10 +189,10 @@ contract PermissionedDisputeGame_Step_Test is PermissionedDisputeGame_TestInit { } } -/// @title PermissionedDisputeGame_Unclassified_Test +/// @title PermissionedDisputeGame_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `PermissionedDisputeGame` contract or are testing multiple functions at once. -contract PermissionedDisputeGame_Unclassified_Test is PermissionedDisputeGame_TestInit { +contract PermissionedDisputeGame_Uncategorized_Test is PermissionedDisputeGame_TestInit { /// @notice Tests that the proposer can create a permissioned dispute game. function test_createGame_proposer_succeeds() public { uint256 bondAmount = disputeGameFactory.initBonds(GAME_TYPE); diff --git a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol index 8574646a9fdf3..48e839adb9b4b 100644 --- a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol @@ -2325,10 +2325,10 @@ contract SuperFaultDisputeGame_GetChallengerDuration_Test is SuperFaultDisputeGa } } -/// @title SuperFaultDisputeGame_Unclassified_Test +/// @title SuperFaultDisputeGame_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `SuperFaultDisputeGame` /// contract or are testing multiple functions at once. -contract SuperFaultDisputeGame_Unclassified_Test is SuperFaultDisputeGame_TestInit { +contract SuperFaultDisputeGame_Uncategorized_Test is SuperFaultDisputeGame_TestInit { /// @notice Tests that the game's starting timestamp is set correctly. function test_createdAt_succeeds() public view { assertEq(gameProxy.createdAt().raw(), block.timestamp); diff --git a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol index 3fd1102dff3d1..69369c18e47f2 100644 --- a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol @@ -187,10 +187,10 @@ contract SuperPermissionedDisputeGame_Step_Test is SuperPermissionedDisputeGame_ } } -/// @title SuperPermissionedDisputeGame_Unclassified_Test +/// @title SuperPermissionedDisputeGame_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `SuperPermissionedDisputeGame` contract or are testing multiple functions at once. -contract SuperPermissionedDisputeGame_Unclassified_Test is SuperPermissionedDisputeGame_TestInit { +contract SuperPermissionedDisputeGame_Uncategorized_Test is SuperPermissionedDisputeGame_TestInit { /// @notice Tests that the proposer can create a permissioned dispute game. function test_createGame_proposer_succeeds() public { uint256 bondAmount = disputeGameFactory.initBonds(GAME_TYPE); diff --git a/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol b/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol index b14228af0d6b0..b14975378a51f 100644 --- a/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol +++ b/packages/contracts-bedrock/test/dispute/lib/LibPosition.t.sol @@ -213,10 +213,10 @@ contract LibPosition_Move_Test is LibPosition_TestInit { } } -/// @title LibPosition_Unclassified_Test +/// @title LibPosition_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `LibPosition` contract /// or are testing multiple functions at once. -contract LibPosition_Unclassified_Test is LibPosition_TestInit { +contract LibPosition_Uncategorized_Test is LibPosition_TestInit { /// @notice A static unit test for the correctness of all gindicies, (depth, index) combos, and /// the trace index in a tree of max depth = 4. function test_pos_correctness_succeeds() public pure { diff --git a/packages/contracts-bedrock/test/dispute/v2/FaultDisputeGameV2.t.sol b/packages/contracts-bedrock/test/dispute/v2/FaultDisputeGameV2.t.sol new file mode 100644 index 0000000000000..feab844563187 --- /dev/null +++ b/packages/contracts-bedrock/test/dispute/v2/FaultDisputeGameV2.t.sol @@ -0,0 +1,3207 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +// Testing +import { Vm } from "forge-std/Vm.sol"; +import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; +import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; +import { stdError } from "forge-std/StdError.sol"; + +// Scripts +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Contracts +import { DisputeActor, HonestDisputeActor } from "test/actors/FaultDisputeActors.sol"; + +// Libraries +import { Types } from "src/libraries/Types.sol"; +import { Hashing } from "src/libraries/Hashing.sol"; +import { RLPWriter } from "src/libraries/rlp/RLPWriter.sol"; +import { LibClock } from "src/dispute/lib/LibUDT.sol"; +import { LibPosition } from "src/dispute/lib/LibPosition.sol"; +import "src/dispute/lib/Types.sol"; +import "src/dispute/lib/Errors.sol"; + +// Interfaces +import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; +import { IPreimageOracle } from "interfaces/dispute/IBigStepper.sol"; +import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; +import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; +import { IFaultDisputeGameV2 } from "interfaces/dispute/v2/IFaultDisputeGameV2.sol"; + +contract ClaimCreditReenter { + Vm internal immutable vm; + IFaultDisputeGameV2 internal immutable GAME; + uint256 public numCalls; + + constructor(IFaultDisputeGameV2 _gameProxy, Vm _vm) { + GAME = _gameProxy; + vm = _vm; + } + + function claimCredit(address _recipient) public { + numCalls += 1; + if (numCalls > 1) { + vm.expectRevert(NoCreditToClaim.selector); + } + GAME.claimCredit(_recipient); + } + + receive() external payable { + if (numCalls == 5) { + return; + } + claimCredit(address(this)); + } +} + +/// @notice Helper to change the VM status byte of a claim. +function _changeClaimStatus(Claim _claim, VMStatus _status) pure returns (Claim out_) { + assembly { + out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) + } +} + +/// @title BaseFaultDisputeGameV2_TestInit +/// @notice Base test initializer that can be used by other contracts outside of this test suite. +contract BaseFaultDisputeGameV2_TestInit is DisputeGameFactory_TestInit { + /// @dev The type of the game being tested. + GameType internal immutable GAME_TYPE = GameTypes.CANNON; + + /// @dev The initial bond for the game. + uint256 internal initBond; + + /// @dev The implementation of the game. + IFaultDisputeGameV2 internal gameImpl; + /// @dev The `Clone` proxy of the game. + IFaultDisputeGameV2 internal gameProxy; + + /// @dev The extra data passed to the game for initialization. + bytes internal extraData; + + event Move(uint256 indexed parentIndex, Claim indexed pivot, address indexed claimant); + event GameClosed(BondDistributionMode bondDistributionMode); + + event ReceiveETH(uint256 amount); + + function init(Claim rootClaim, Claim absolutePrestate, uint256 l2BlockNumber) public { + // Set the time to a realistic date. + if (!isForkTest()) { + vm.warp(1690906994); + } + + // Set the extra data for the game creation + extraData = abi.encode(l2BlockNumber); + + (address _impl, AlphabetVM _vm,) = setupFaultDisputeGameV2(absolutePrestate); + gameImpl = IFaultDisputeGameV2(_impl); + + // Set the init bond for the given game type. + initBond = disputeGameFactory.initBonds(GAME_TYPE); + + // Warp ahead of the game retirement timestamp if needed. + if (block.timestamp <= anchorStateRegistry.retirementTimestamp()) { + vm.warp(anchorStateRegistry.retirementTimestamp() + 1); + } + + // Create a new game. + gameProxy = IFaultDisputeGameV2( + payable(address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, rootClaim, extraData))) + ); + + // Check immutables + assertEq(gameProxy.gameType().raw(), GAME_TYPE.raw()); + assertEq(gameProxy.absolutePrestate().raw(), absolutePrestate.raw()); + assertEq(gameProxy.maxGameDepth(), 2 ** 3); + assertEq(gameProxy.splitDepth(), 2 ** 2); + assertEq(gameProxy.clockExtension().raw(), 3 hours); + assertEq(gameProxy.maxClockDuration().raw(), 3.5 days); + assertEq(address(gameProxy.weth()), address(delayedWeth)); + assertEq(address(gameProxy.anchorStateRegistry()), address(anchorStateRegistry)); + assertEq(address(gameProxy.vm()), address(_vm)); + assertEq(address(gameProxy.gameCreator()), address(this)); + assertEq(gameProxy.l2ChainId(), l2ChainId); + + // Label the proxy + vm.label(address(gameProxy), "FaultDisputeGame_Clone"); + } + + fallback() external payable { } + + receive() external payable { } + + function copyBytes(bytes memory src, bytes memory dest) internal pure returns (bytes memory) { + uint256 byteCount = src.length < dest.length ? src.length : dest.length; + for (uint256 i = 0; i < byteCount; i++) { + dest[i] = src[i]; + } + return dest; + } +} + +/// @title FaultDisputeGameV2_TestInit +/// @notice Reusable test initialization for `FaultDisputeGame` tests. +contract FaultDisputeGameV2_TestInit is BaseFaultDisputeGameV2_TestInit { + /// @dev The root claim of the game. + Claim internal ROOT_CLAIM; + /// @dev An arbitrary root claim for testing. + Claim internal arbitaryRootClaim = Claim.wrap(bytes32(uint256(123))); + + /// @dev The preimage of the absolute prestate claim + bytes internal absolutePrestateData; + /// @dev The absolute prestate of the trace. + Claim internal absolutePrestate; + /// @dev A valid l2BlockNumber that comes after the current anchor root block. + uint256 internal validL2BlockNumber; + + function setUp() public virtual override { + absolutePrestateData = abi.encode(0); + absolutePrestate = _changeClaimStatus(Claim.wrap(keccak256(absolutePrestateData)), VMStatuses.UNFINISHED); + + super.setUp(); + + // Get the actual anchor roots + (Hash root, uint256 l2Bn) = anchorStateRegistry.getAnchorRoot(); + validL2BlockNumber = l2Bn + 1; + + ROOT_CLAIM = Claim.wrap(Hash.unwrap(root)); + + super.init({ rootClaim: ROOT_CLAIM, absolutePrestate: absolutePrestate, l2BlockNumber: validL2BlockNumber }); + } + + /// @notice Helper to generate a mock RLP encoded header (with only a real block number) & an + /// output root proof. + function _generateOutputRootProof( + bytes32 _storageRoot, + bytes32 _withdrawalRoot, + bytes memory _l2BlockNumber + ) + internal + pure + returns (Types.OutputRootProof memory proof_, bytes32 root_, bytes memory rlp_) + { + // L2 Block header + bytes[] memory rawHeaderRLP = new bytes[](9); + rawHeaderRLP[0] = hex"83FACADE"; + rawHeaderRLP[1] = hex"83FACADE"; + rawHeaderRLP[2] = hex"83FACADE"; + rawHeaderRLP[3] = hex"83FACADE"; + rawHeaderRLP[4] = hex"83FACADE"; + rawHeaderRLP[5] = hex"83FACADE"; + rawHeaderRLP[6] = hex"83FACADE"; + rawHeaderRLP[7] = hex"83FACADE"; + rawHeaderRLP[8] = RLPWriter.writeBytes(_l2BlockNumber); + rlp_ = RLPWriter.writeList(rawHeaderRLP); + + // Output root + proof_ = Types.OutputRootProof({ + version: 0, + stateRoot: _storageRoot, + messagePasserStorageRoot: _withdrawalRoot, + latestBlockhash: keccak256(rlp_) + }); + root_ = Hashing.hashOutputRootProof(proof_); + } + + /// @notice Helper to get the required bond for the given claim index. + function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { + (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); + Position pos = parent.move(true); + bond_ = gameProxy.getRequiredBond(pos); + } + + /// @notice Helper to return a pseudo-random claim + function _dummyClaim() internal view returns (Claim) { + return Claim.wrap(keccak256(abi.encode(gasleft()))); + } + + /// @notice Helper to get the localized key for an identifier in the context of the game proxy. + function _getKey(uint256 _ident, bytes32 _localContext) internal view returns (bytes32) { + bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy), _localContext)); + return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248)); + } +} + +/// @title FaultDisputeGame_Version_Test +/// @notice Tests the `version` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Version_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's version function returns a string. + function test_version_works() public view { + assertTrue(bytes(gameProxy.version()).length > 0); + } +} + +/// @title FaultDisputeGame_Constructor_Test +/// @notice Tests the constructor of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Constructor_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the constructor of the `FaultDisputeGame` reverts when the + /// `MAX_GAME_DEPTH` parameter is greater than `LibPosition.MAX_POSITION_BITLEN - 1`. + function testFuzz_constructor_maxDepthTooLarge_reverts(uint256 _maxGameDepth) public { + _maxGameDepth = bound(_maxGameDepth, LibPosition.MAX_POSITION_BITLEN, type(uint256).max - 1); + vm.expectRevert(MaxDepthTooLarge.selector); + DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGameV2.__constructor__, + ( + IFaultDisputeGameV2.GameConstructorParams({ + gameType: GAME_TYPE, + maxGameDepth: _maxGameDepth, + splitDepth: _maxGameDepth + 1, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days) + }) + ) + ) + ) + }); + } + + /// @notice Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` + /// parameter is greater than or equal to the `MAX_GAME_DEPTH` + function testFuzz_constructor_invalidSplitDepth_reverts(uint256 _splitDepth) public { + uint256 maxGameDepth = 2 ** 3; + _splitDepth = bound(_splitDepth, maxGameDepth - 1, type(uint256).max); + vm.expectRevert(InvalidSplitDepth.selector); + DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGameV2.__constructor__, + ( + IFaultDisputeGameV2.GameConstructorParams({ + gameType: GAME_TYPE, + maxGameDepth: maxGameDepth, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days) + }) + ) + ) + ) + }); + } + + /// @notice Tests that the constructor of the `FaultDisputeGame` reverts when the `_splitDepth` + /// parameter is less than the minimum split depth (currently 2). + function testFuzz_constructor_lowSplitDepth_reverts(uint256 _splitDepth) public { + uint256 minSplitDepth = 2; + _splitDepth = bound(_splitDepth, 0, minSplitDepth - 1); + vm.expectRevert(InvalidSplitDepth.selector); + DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGameV2.__constructor__, + ( + IFaultDisputeGameV2.GameConstructorParams({ + gameType: GAME_TYPE, + maxGameDepth: 2 ** 3, + splitDepth: _splitDepth, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days) + }) + ) + ) + ) + }); + } + + /// @notice Tests that the constructor of the `FaultDisputeGame` reverts when clock + /// extension * 2 is greater than the max clock duration. + function testFuzz_constructor_clockExtensionTooLong_reverts( + uint64 _maxClockDuration, + uint64 _clockExtension + ) + public + { + // Force the clock extension * 2 to be greater than the max clock duration, but keep things + // within bounds of the uint64 type. + _maxClockDuration = uint64(bound(_maxClockDuration, 0, type(uint64).max / 2 - 1)); + _clockExtension = uint64(bound(_clockExtension, _maxClockDuration / 2 + 1, type(uint64).max / 2)); + + vm.expectRevert(InvalidClockExtension.selector); + DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGameV2.__constructor__, + ( + IFaultDisputeGameV2.GameConstructorParams({ + gameType: GAME_TYPE, + maxGameDepth: 16, + splitDepth: 8, + clockExtension: Duration.wrap(_clockExtension), + maxClockDuration: Duration.wrap(_maxClockDuration) + }) + ) + ) + ) + }); + } + + /// @notice Tests that the constructor of the `FaultDisputeGame` reverts when the `_gameType` + /// parameter is set to the reserved `type(uint32).max` game type. + function test_constructor_reservedGameType_reverts() public { + vm.expectRevert(ReservedGameType.selector); + DeployUtils.create1({ + _name: "FaultDisputeGameV2", + _args: DeployUtils.encodeConstructor( + abi.encodeCall( + IFaultDisputeGameV2.__constructor__, + ( + IFaultDisputeGameV2.GameConstructorParams({ + gameType: GameType.wrap(type(uint32).max), + maxGameDepth: 16, + splitDepth: 8, + clockExtension: Duration.wrap(3 hours), + maxClockDuration: Duration.wrap(3.5 days) + }) + ) + ) + ) + }); + } +} + +/// @title FaultDisputeGame_Initialize_Test +/// @notice Tests the initialization of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Initialize_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game cannot be initialized with an output root that commits to <= + /// the configured starting block number + function testFuzz_initialize_cannotProposeGenesis_reverts(uint256 _blockNumber) public { + (, uint256 startingL2Block) = gameProxy.startingOutputRoot(); + _blockNumber = bound(_blockNumber, 0, startingL2Block); + + Claim claim = _dummyClaim(); + vm.expectRevert(abi.encodeWithSelector(UnexpectedRootClaim.selector, claim)); + gameProxy = IFaultDisputeGameV2( + payable(address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, abi.encode(_blockNumber)))) + ); + } + + /// @notice Tests that the proxy receives ETH from the dispute game factory. + function test_initialize_receivesETH_succeeds() public { + uint256 _value = disputeGameFactory.initBonds(GAME_TYPE); + vm.deal(address(this), _value); + + assertEq(address(gameProxy).balance, 0); + gameProxy = IFaultDisputeGameV2( + payable( + address( + disputeGameFactory.create{ value: _value }( + GAME_TYPE, arbitaryRootClaim, abi.encode(validL2BlockNumber) + ) + ) + ) + ); + assertEq(address(gameProxy).balance, 0); + assertEq(delayedWeth.balanceOf(address(gameProxy)), _value); + } + + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by extraData of the wrong length + function test_initialize_wrongExtradataLength_reverts(uint256 _extraDataLen) public { + // The `DisputeGameFactory` will pack the root claim and the extra data into a single + // array, which is enforced to be at least 64 bytes long. + // We bound the upper end to 23.5KB to ensure that the minimal proxy never surpasses the + // contract size limit in this test, as CWIA proxies store the immutable args in their + // bytecode. + // [0 bytes, 31 bytes] u [33 bytes, 23.5 KB] + _extraDataLen = bound(_extraDataLen, 0, 23_500); + if (_extraDataLen == 32) { + _extraDataLen++; + } + bytes memory _extraData = new bytes(_extraDataLen); + + // Assign the first 32 bytes in `extraData` to a valid L2 block number passed the starting + // block. + (, uint256 startingL2Block) = gameProxy.startingOutputRoot(); + assembly { + mstore(add(_extraData, 0x20), add(startingL2Block, 1)) + } + + Claim claim = _dummyClaim(); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IFaultDisputeGameV2( + payable(address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, _extraData))) + ); + } + + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by additional immutable args data + function test_initialize_extraImmutableArgsBytes_reverts(uint256 _extraByteCount) public { + (bytes memory correctArgs,,) = getFaultDisputeGameV2ImmutableArgs(absolutePrestate); + + // We bound the upper end to 23.5KB to ensure that the minimal proxy never surpasses the + // contract size limit in this test, as CWIA proxies store the immutable args in their + // bytecode. + _extraByteCount = bound(_extraByteCount, 1, 23_500); + bytes memory immutableArgs = new bytes(_extraByteCount + correctArgs.length); + // Copy correct args into immutable args + copyBytes(correctArgs, immutableArgs); + + // Set up dispute game implementation with target immutableArgs + setupFaultDisputeGameV2(immutableArgs); + + Claim claim = _dummyClaim(); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IFaultDisputeGameV2( + payable( + address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, abi.encode(validL2BlockNumber))) + ) + ); + } + + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by missing immutable args data + function test_initialize_missingImmutableArgsBytes_reverts(uint256 _truncatedByteCount) public { + (bytes memory correctArgs,,) = getFaultDisputeGameV2ImmutableArgs(absolutePrestate); + + _truncatedByteCount = (_truncatedByteCount % correctArgs.length) + 1; + bytes memory immutableArgs = new bytes(correctArgs.length - _truncatedByteCount); + // Copy correct args into immutable args + copyBytes(correctArgs, immutableArgs); + + // Set up dispute game implementation with target immutableArgs + setupFaultDisputeGameV2(immutableArgs); + + Claim claim = _dummyClaim(); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IFaultDisputeGameV2( + payable( + address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, abi.encode(validL2BlockNumber))) + ) + ); + } + + /// @notice Tests that the game is initialized with the correct data. + function test_initialize_correctData_succeeds() public view { + // Assert that the root claim is initialized correctly. + ( + uint32 parentIndex, + address counteredBy, + address claimant, + uint128 bond, + Claim claim, + Position position, + Clock clock + ) = gameProxy.claimData(0); + assertEq(parentIndex, type(uint32).max); + assertEq(counteredBy, address(0)); + assertEq(claimant, address(this)); + assertEq(bond, initBond); + assertEq(claim.raw(), ROOT_CLAIM.raw()); + assertEq(position.raw(), 1); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))).raw()); + + // Assert that the `createdAt` timestamp is correct. + assertEq(gameProxy.createdAt().raw(), block.timestamp); + + // Assert that the blockhash provided is correct. + assertEq(gameProxy.l1Head().raw(), blockhash(block.number - 1)); + } + + /// @notice Tests that the game cannot be initialized when the anchor root is not found. + function test_initialize_anchorRootNotFound_reverts() public { + // Mock the AnchorStateRegistry to return a zero root. + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(IAnchorStateRegistry.getAnchorRoot, ()), + abi.encode(Hash.wrap(bytes32(0)), 0) + ); + + // Creation should fail. + vm.expectRevert(AnchorRootNotFound.selector); + gameProxy = IFaultDisputeGameV2( + payable( + address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, _dummyClaim(), new bytes(uint256(32)))) + ) + ); + } + + /// @notice Tests that the game cannot be initialized twice. + function test_initialize_onlyOnce_succeeds() public { + vm.expectRevert(AlreadyInitialized.selector); + gameProxy.initialize(); + } + + /// @notice Tests that initialization reverts when oracle challenge period is too large. + /// @dev V2 validates oracle challenge period during initialize(), not constructor + function testFuzz_initialize_oracleChallengePeriodTooLarge_reverts(uint256 _challengePeriod) public { + // Bound to values larger than uint64.max + _challengePeriod = bound(_challengePeriod, uint256(type(uint64).max) + 1, type(uint256).max); + + // Get the current AlphabetVM from the setup + (, AlphabetVM vm_,) = setupFaultDisputeGameV2(absolutePrestate); + + // Mock the VM's oracle to return invalid challenge period + vm.mockCall( + address(vm_.oracle()), abi.encodeCall(IPreimageOracle.challengePeriod, ()), abi.encode(_challengePeriod) + ); + + // Expect the initialize call to revert with InvalidChallengePeriod + vm.expectRevert(InvalidChallengePeriod.selector); + + // Create game via factory - initialize() is called automatically and should revert + gameProxy = IFaultDisputeGameV2( + payable( + address( + disputeGameFactory.create{ value: initBond }( + GAME_TYPE, _dummyClaim(), abi.encode(validL2BlockNumber) + ) + ) + ) + ); + } +} + +/// @title FaultDisputeGame_Step_Test +/// @notice Tests the step functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Step_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that a claim cannot be stepped against twice. + function test_step_duplicateStep_reverts() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, _dummyClaim()); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, absolutePrestateData, hex""); + + vm.expectRevert(DuplicateStep.selector); + gameProxy.step(8, true, absolutePrestateData, hex""); + } + + /// @notice Tests that successfully step with true attacking claim when there is a true defend + /// claim(claim5) in the middle of the dispute game. + function test_stepAttackDummyClaim_defendTrueClaimInTheMiddle_succeeds() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + bytes memory claimData5 = abi.encode(5, 5); + Claim claim5 = Claim.wrap(keccak256(claimData5)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, claim5); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.defend{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, _dummyClaim()); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, claimData5, hex""); + } + + /// @notice Tests that successfully step with true defend claim when there is a true defend + /// claim(claim7) in the middle of the dispute game. + function test_stepDefendDummyClaim_defendTrueClaimInTheMiddle_succeeds() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + bytes memory claimData7 = abi.encode(7, 7); + Claim postState_ = Claim.wrap(gameProxy.vm().step(claimData7, hex"", bytes32(0))); + + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.defend{ value: _getRequiredBond(6) }(disputed, 6, postState_); + (,,,, disputed,,) = gameProxy.claimData(7); + + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, Claim.wrap(keccak256(claimData7))); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, false, claimData7, hex""); + } + + /// @notice Tests that step reverts with false attacking claim when there is a true defend + /// claim(claim5) in the middle of the dispute game. + function test_stepAttackTrueClaim_defendTrueClaimInTheMiddle_reverts() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + bytes memory claimData5 = abi.encode(5, 5); + Claim claim5 = Claim.wrap(keccak256(claimData5)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, claim5); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.defend{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + Claim postState_ = Claim.wrap(gameProxy.vm().step(claimData5, hex"", bytes32(0))); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, postState_); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + + vm.expectRevert(ValidStep.selector); + gameProxy.step(8, true, claimData5, hex""); + } + + /// @notice Tests that step reverts with false defending claim when there is a true defend + /// claim(postState_) in the middle of the dispute game. + function test_stepDefendDummyClaim_defendTrueClaimInTheMiddle_reverts() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + bytes memory claimData7 = abi.encode(5, 5); + Claim postState_ = Claim.wrap(gameProxy.vm().step(claimData7, hex"", bytes32(0))); + + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, postState_); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.defend{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + + bytes memory _dummyClaimData = abi.encode(gasleft(), gasleft()); + Claim dummyClaim7 = Claim.wrap(keccak256(_dummyClaimData)); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, dummyClaim7); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + vm.expectRevert(ValidStep.selector); + gameProxy.step(8, false, _dummyClaimData, hex""); + } + + /// @notice Tests that step reverts with true defending claim when there is a true defend + /// claim(postState_) in the middle of the dispute game. + function test_stepDefendTrueClaim_defendTrueClaimInTheMiddle_reverts() public { + // Give the test contract some ether + vm.deal(address(this), 1000 ether); + + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + bytes memory claimData7 = abi.encode(5, 5); + Claim claim7 = Claim.wrap(keccak256(claimData7)); + Claim postState_ = Claim.wrap(gameProxy.vm().step(claimData7, hex"", bytes32(0))); + + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, postState_); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.defend{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, claim7); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + + vm.expectRevert(ValidStep.selector); + gameProxy.step(8, false, claimData7, hex""); + } +} + +/// @title FaultDisputeGame_Move_Test +/// @notice Tests the move functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Move_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that a move while the game status is not `IN_PROGRESS` causes the call to + /// revert with the `GameNotInProgress` error + function test_move_gameNotInProgress_reverts() public { + uint256 chalWins = uint256(GameStatus.CHALLENGER_WINS); + + // Replace the game status in storage. It exists in slot 0 at offset 16. + uint256 slot = uint256(vm.load(address(gameProxy), bytes32(0))); + uint256 offset = 16 << 3; + uint256 mask = 0xFF << offset; + // Replace the byte in the slot value with the challenger wins status. + slot = (slot & ~mask) | (chalWins << offset); + vm.store(address(gameProxy), bytes32(0), bytes32(slot)); + + // Ensure that the game status was properly updated. + GameStatus status = gameProxy.status(); + assertEq(uint256(status), chalWins); + + (,,,, Claim root,,) = gameProxy.claimData(0); + // Attempt to make a move. Should revert. + vm.expectRevert(GameNotInProgress.selector); + gameProxy.attack(root, 0, Claim.wrap(0)); + } + + /// @notice Tests that an attempt to defend the root claim reverts with the + /// `CannotDefendRootClaim` error. + function test_move_defendRoot_reverts() public { + (,,,, Claim root,,) = gameProxy.claimData(0); + vm.expectRevert(CannotDefendRootClaim.selector); + gameProxy.defend(root, 0, _dummyClaim()); + } + + /// @notice Tests that an attempt to move against a claim that does not exist reverts with the + /// `ParentDoesNotExist` error. + function test_move_nonExistentParent_reverts() public { + Claim claim = _dummyClaim(); + + // Expect an out of bounds revert for an attack + vm.expectRevert(stdError.indexOOBError); + gameProxy.attack(_dummyClaim(), 1, claim); + + // Expect an out of bounds revert for a defense + vm.expectRevert(stdError.indexOOBError); + gameProxy.defend(_dummyClaim(), 1, claim); + } + + /// @notice Tests that an attempt to move at the maximum game depth reverts with the + /// `GameDepthExceeded` error. + function test_move_gameDepthExceeded_reverts() public { + Claim claim = _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC); + + uint256 maxDepth = gameProxy.maxGameDepth(); + + for (uint256 i = 0; i <= maxDepth; i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(i); + // At the max game depth, the `_move` function should revert with + // the `GameDepthExceeded` error. + if (i == maxDepth) { + vm.expectRevert(GameDepthExceeded.selector); + gameProxy.attack{ value: 100 ether }(disputed, i, claim); + } else { + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, claim); + } + } + } + + /// @notice Tests that a move made after the clock time has exceeded reverts with the + /// `ClockTimeExceeded` error. + function test_move_clockTimeExceeded_reverts() public { + // Warp ahead past the clock time for the first move (3 1/2 days) + vm.warp(block.timestamp + 3 days + 12 hours + 1); + uint256 bond = _getRequiredBond(0); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.expectRevert(ClockTimeExceeded.selector); + gameProxy.attack{ value: bond }(disputed, 0, _dummyClaim()); + } + + /// @notice Static unit test for the correctness of the chess clock incrementation. + function test_move_clockCorrectness_succeeds() public { + (,,,,,, Clock clock) = gameProxy.claimData(0); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))).raw()); + + Claim claim = _dummyClaim(); + + vm.warp(block.timestamp + 15); + uint256 bond = _getRequiredBond(0); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, claim); + (,,,,,, clock) = gameProxy.claimData(1); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(15), Timestamp.wrap(uint64(block.timestamp))).raw()); + + vm.warp(block.timestamp + 10); + bond = _getRequiredBond(1); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: bond }(disputed, 1, claim); + (,,,,,, clock) = gameProxy.claimData(2); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(10), Timestamp.wrap(uint64(block.timestamp))).raw()); + + // We are at the split depth, so we need to set the status byte of the claim for the next + // move. + claim = _changeClaimStatus(claim, VMStatuses.PANIC); + + vm.warp(block.timestamp + 10); + bond = _getRequiredBond(2); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: bond }(disputed, 2, claim); + (,,,,,, clock) = gameProxy.claimData(3); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(25), Timestamp.wrap(uint64(block.timestamp))).raw()); + + vm.warp(block.timestamp + 10); + bond = _getRequiredBond(3); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: bond }(disputed, 3, claim); + (,,,,,, clock) = gameProxy.claimData(4); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(20), Timestamp.wrap(uint64(block.timestamp))).raw()); + } + + /// @notice Tests that the standard clock extension is triggered for a move that is not the + /// split depth or the max game depth. + function test_move_standardClockExtension_succeeds() public { + (,,,,,, Clock clock) = gameProxy.claimData(0); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))).raw()); + + uint256 bond; + Claim disputed; + Claim claim = _dummyClaim(); + uint256 splitDepth = gameProxy.splitDepth(); + uint64 halfGameDuration = gameProxy.maxClockDuration().raw(); + uint64 clockExtension = gameProxy.clockExtension().raw(); + + // Warp ahead so that the next move will trigger a clock extension. We warp to the very + // first timestamp where a clock extension should be triggered. + vm.warp(block.timestamp + halfGameDuration - clockExtension + 1 seconds); + + // Execute a move that should cause a clock extension. + bond = _getRequiredBond(0); + (,,,, disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, claim); + (,,,,,, clock) = gameProxy.claimData(1); + + // The clock should have been pushed back to the clock extension time. + assertEq(clock.duration().raw(), halfGameDuration - clockExtension); + + // Warp ahead again so that clock extensions will also trigger for the other team. Here we + // only warp to the clockExtension time because we'll be warping ahead by one second during + // each additional move. + vm.warp(block.timestamp + halfGameDuration - clockExtension); + + // Work our way down to the split depth. + for (uint256 i = 1; i < splitDepth - 2; i++) { + // Warp ahead by one second so that the next move will trigger a clock extension. + vm.warp(block.timestamp + 1 seconds); + + // Execute a move that should cause a clock extension. + bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, claim); + (,,,,,, clock) = gameProxy.claimData(i + 1); + + // The clock should have been pushed back to the clock extension time. + assertEq(clock.duration().raw(), halfGameDuration - clockExtension); + } + } + + function test_move_splitDepthClockExtension_succeeds() public { + (,,,,,, Clock clock) = gameProxy.claimData(0); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))).raw()); + + uint256 bond; + Claim disputed; + Claim claim = _dummyClaim(); + uint256 splitDepth = gameProxy.splitDepth(); + uint64 halfGameDuration = gameProxy.maxClockDuration().raw(); + uint64 clockExtension = gameProxy.clockExtension().raw(); + + // Work our way down to the split depth without moving ahead in time, we don't care about + // the exact clock here, just don't want take the clock below the clock extension time that + // we're trying to test here. + for (uint256 i = 0; i < splitDepth - 2; i++) { + bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, claim); + } + + // Warp ahead to the very first timestamp where a clock extension should be triggered. + vm.warp(block.timestamp + halfGameDuration - clockExtension * 2 + 1 seconds); + + // Execute a move that should cause a clock extension. + bond = _getRequiredBond(splitDepth - 2); + (,,,, disputed,,) = gameProxy.claimData(splitDepth - 2); + gameProxy.attack{ value: bond }(disputed, splitDepth - 2, claim); + (,,,,,, clock) = gameProxy.claimData(splitDepth - 1); + + // The clock should have been pushed back to the clock extension time. + assertEq(clock.duration().raw(), halfGameDuration - clockExtension * 2); + } + + function test_move_maxGameDepthClockExtension_succeeds() public { + (,,,,,, Clock clock) = gameProxy.claimData(0); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp))).raw()); + + uint256 bond; + Claim disputed; + Claim claim = _dummyClaim(); + uint256 splitDepth = gameProxy.splitDepth(); + uint64 halfGameDuration = gameProxy.maxClockDuration().raw(); + uint64 clockExtension = gameProxy.clockExtension().raw(); + + // Work our way down to the split depth without moving ahead in time, we don't care about + // the exact clock here, just don't want take the clock below the clock extension time that + // we're trying to test here. + for (uint256 i = 0; i < gameProxy.maxGameDepth() - 2; i++) { + bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, claim); + + // Change the claim status when we're crossing the split depth. + if (i == splitDepth - 2) { + claim = _changeClaimStatus(claim, VMStatuses.PANIC); + } + } + + // Warp ahead to the very first timestamp where a clock extension should be triggered. + vm.warp(block.timestamp + halfGameDuration - (clockExtension + gameProxy.vm().oracle().challengePeriod()) + 1); + + // Execute a move that should cause a clock extension. + bond = _getRequiredBond(gameProxy.maxGameDepth() - 2); + (,,,, disputed,,) = gameProxy.claimData(gameProxy.maxGameDepth() - 2); + gameProxy.attack{ value: bond }(disputed, gameProxy.maxGameDepth() - 2, claim); + (,,,,,, clock) = gameProxy.claimData(gameProxy.maxGameDepth() - 1); + + // The clock should have been pushed back to the clock extension time. + assertEq( + clock.duration().raw(), halfGameDuration - (clockExtension + gameProxy.vm().oracle().challengePeriod()) + ); + } + + /// @notice Tests that an identical claim cannot be made twice. The duplicate claim attempt + /// should revert with the `ClaimAlreadyExists` error. + function test_move_duplicateClaim_reverts() public { + Claim claim = _dummyClaim(); + + // Make the first move. This should succeed. + uint256 bond = _getRequiredBond(0); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, claim); + + // Attempt to make the same move again. + vm.expectRevert(ClaimAlreadyExists.selector); + gameProxy.attack{ value: bond }(disputed, 0, claim); + } + + /// @notice Static unit test asserting that identical claims at the same position can be made + /// in different subgames. + function test_move_duplicateClaimsDifferentSubgames_succeeds() public { + Claim claimA = _dummyClaim(); + Claim claimB = _dummyClaim(); + + // Make the first moves. This should succeed. + uint256 bond = _getRequiredBond(0); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, claimA); + gameProxy.attack{ value: bond }(disputed, 0, claimB); + + // Perform an attack at the same position with the same claim value in both subgames. + // These both should succeed. + bond = _getRequiredBond(1); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: bond }(disputed, 1, claimA); + bond = _getRequiredBond(2); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: bond }(disputed, 2, claimA); + } + + /// @notice Static unit test for the correctness of an opening attack. + function test_move_simpleAttack_succeeds() public { + // Warp ahead 5 seconds. + vm.warp(block.timestamp + 5); + + Claim counter = _dummyClaim(); + + // Perform the attack. + uint256 reqBond = _getRequiredBond(0); + vm.expectEmit(true, true, true, false); + emit Move(0, counter, address(this)); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: reqBond }(disputed, 0, counter); + + // Grab the claim data of the attack. + ( + uint32 parentIndex, + address counteredBy, + address claimant, + uint128 bond, + Claim claim, + Position position, + Clock clock + ) = gameProxy.claimData(1); + + // Assert correctness of the attack claim's data. + assertEq(parentIndex, 0); + assertEq(counteredBy, address(0)); + assertEq(claimant, address(this)); + assertEq(bond, reqBond); + assertEq(claim.raw(), counter.raw()); + assertEq(position.raw(), Position.wrap(1).move(true).raw()); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(5), Timestamp.wrap(uint64(block.timestamp))).raw()); + + // Grab the claim data of the parent. + (parentIndex, counteredBy, claimant, bond, claim, position, clock) = gameProxy.claimData(0); + + // Assert correctness of the parent claim's data. + assertEq(parentIndex, type(uint32).max); + assertEq(counteredBy, address(0)); + assertEq(claimant, address(this)); + assertEq(bond, initBond); + assertEq(claim.raw(), ROOT_CLAIM.raw()); + assertEq(position.raw(), 1); + assertEq(clock.raw(), LibClock.wrap(Duration.wrap(0), Timestamp.wrap(uint64(block.timestamp - 5))).raw()); + } + + /// @notice Tests that making a claim at the execution trace bisection root level with an + /// invalid status byte reverts with the `UnexpectedRootClaim` error. + function test_move_incorrectStatusExecRoot_reverts() public { + Claim disputed; + for (uint256 i; i < 4; i++) { + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, _dummyClaim()); + } + + uint256 bond = _getRequiredBond(4); + (,,,, disputed,,) = gameProxy.claimData(4); + vm.expectRevert(abi.encodeWithSelector(UnexpectedRootClaim.selector, bytes32(0))); + gameProxy.attack{ value: bond }(disputed, 4, Claim.wrap(bytes32(0))); + } + + /// @notice Tests that making a claim at the execution trace bisection root level with a valid + /// status byte succeeds. + function test_move_correctStatusExecRoot_succeeds() public { + Claim disputed; + for (uint256 i; i < 4; i++) { + uint256 bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, _dummyClaim()); + } + uint256 lastBond = _getRequiredBond(4); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: lastBond }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + } + + /// @notice Static unit test asserting that a move reverts when the bonded amount is incorrect. + function test_move_incorrectBondAmount_reverts() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.expectRevert(IncorrectBondAmount.selector); + gameProxy.attack{ value: 0 }(disputed, 0, _dummyClaim()); + } + + /// @notice Static unit test asserting that a move reverts when the disputed claim does not + /// match its index. + function test_move_incorrectDisputedIndex_reverts() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + uint256 bond = _getRequiredBond(1); + vm.expectRevert(InvalidDisputedClaimIndex.selector); + gameProxy.attack{ value: bond }(disputed, 1, _dummyClaim()); + } +} + +/// @title FaultDisputeGame_AddLocalData_Test +/// @notice Tests the addLocalData functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_AddLocalData_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that adding local data with an out of bounds identifier reverts. + function testFuzz_addLocalData_oob_reverts(uint256 _ident) public { + Claim disputed; + // Get a claim below the split depth so that we can add local data for an execution trace + // subgame. + for (uint256 i; i < 4; i++) { + uint256 bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, _dummyClaim()); + } + uint256 lastBond = _getRequiredBond(4); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: lastBond }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + // [1, 5] are valid local data identifiers. + if (_ident <= 5) _ident = 0; + + vm.expectRevert(InvalidLocalIdent.selector); + gameProxy.addLocalData(_ident, 5, 0); + } + + /// @notice Tests that local data is loaded into the preimage oracle correctly in the subgame + /// that is disputing the transition from `GENESIS -> GENESIS + 1` + function test_addLocalDataGenesisTransition_static_succeeds() public { + IPreimageOracle oracle = IPreimageOracle(address(gameProxy.vm().oracle())); + Claim disputed; + + // Get a claim below the split depth so that we can add local data for an execution trace + // subgame. + for (uint256 i; i < 4; i++) { + uint256 bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, Claim.wrap(bytes32(i))); + } + uint256 lastBond = _getRequiredBond(4); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: lastBond }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + // Expected start/disputed claims + (Hash root,) = gameProxy.startingOutputRoot(); + bytes32 startingClaim = root.raw(); + bytes32 disputedClaim = bytes32(uint256(3)); + Position disputedPos = LibPosition.wrap(4, 0); + + // Expected local data + bytes32[5] memory data = [ + gameProxy.l1Head().raw(), + startingClaim, + disputedClaim, + bytes32(validL2BlockNumber << 0xC0), + bytes32(gameProxy.l2ChainId() << 0xC0) + ]; + + for (uint256 i = 1; i <= 5; i++) { + uint256 expectedLen = i > 3 ? 8 : 32; + bytes32 key = _getKey(i, keccak256(abi.encode(disputedClaim, disputedPos))); + + gameProxy.addLocalData(i, 5, 0); + (bytes32 dat, uint256 datLen) = oracle.readPreimage(key, 0); + assertEq(dat >> 0xC0, bytes32(expectedLen)); + // Account for the length prefix if i > 3 (the data stored at identifiers i <= 3 are + // 32 bytes long, so the expected length is already correct. If i > 3, the data is only + // 8 bytes long, so the length prefix + the data is 16 bytes total.) + assertEq(datLen, expectedLen + (i > 3 ? 8 : 0)); + + gameProxy.addLocalData(i, 5, 8); + (dat, datLen) = oracle.readPreimage(key, 8); + assertEq(dat, data[i - 1]); + assertEq(datLen, expectedLen); + } + } + + /// @notice Tests that local data is loaded into the preimage oracle correctly. + function test_addLocalDataMiddle_static_succeeds() public { + IPreimageOracle oracle = IPreimageOracle(address(gameProxy.vm().oracle())); + Claim disputed; + + // Get a claim below the split depth so that we can add local data for an execution trace + // subgame. + for (uint256 i; i < 4; i++) { + uint256 bond = _getRequiredBond(i); + (,,,, disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: bond }(disputed, i, Claim.wrap(bytes32(i))); + } + uint256 lastBond = _getRequiredBond(4); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.defend{ value: lastBond }(disputed, 4, _changeClaimStatus(ROOT_CLAIM, VMStatuses.VALID)); + + // Expected start/disputed claims + bytes32 startingClaim = bytes32(uint256(3)); + Position startingPos = LibPosition.wrap(4, 0); + bytes32 disputedClaim = bytes32(uint256(2)); + Position disputedPos = LibPosition.wrap(3, 0); + + // Expected local data + bytes32[5] memory data = [ + gameProxy.l1Head().raw(), + startingClaim, + disputedClaim, + bytes32(validL2BlockNumber << 0xC0), + bytes32(gameProxy.l2ChainId() << 0xC0) + ]; + + for (uint256 i = 1; i <= 5; i++) { + uint256 expectedLen = i > 3 ? 8 : 32; + bytes32 key = _getKey(i, keccak256(abi.encode(startingClaim, startingPos, disputedClaim, disputedPos))); + + gameProxy.addLocalData(i, 5, 0); + (bytes32 dat, uint256 datLen) = oracle.readPreimage(key, 0); + assertEq(dat >> 0xC0, bytes32(expectedLen)); + // Account for the length prefix if i > 3 (the data stored at identifiers i <= 3 are + // 32 bytes long, so the expected length is already correct. If i > 3, the data is only + // 8 bytes long, so the length prefix + the data is 16 bytes total.) + assertEq(datLen, expectedLen + (i > 3 ? 8 : 0)); + + gameProxy.addLocalData(i, 5, 8); + (dat, datLen) = oracle.readPreimage(key, 8); + assertEq(dat, data[i - 1]); + assertEq(datLen, expectedLen); + } + } + + /// @notice Tests that the L2 block number claim is favored over the bisected-to block when + /// adding data. + function test_addLocalData_l2BlockNumberExtension_succeeds() public { + // Deploy a new dispute game with a L2 block number claim of 8. This is directly in the + // middle of the leaves in our output bisection test tree, at SPLIT_DEPTH = 2 ** 2 + IFaultDisputeGameV2 game = IFaultDisputeGameV2( + address( + disputeGameFactory.create{ value: initBond }( + GAME_TYPE, Claim.wrap(bytes32(uint256(0xFF))), abi.encode(validL2BlockNumber) + ) + ) + ); + + // Get a claim below the split depth so that we can add local data for an execution trace + // subgame. + { + Claim disputed; + Position parent; + Position pos; + + for (uint256 i; i < 4; i++) { + (,,,,, parent,) = game.claimData(i); + pos = parent.move(true); + uint256 bond = game.getRequiredBond(pos); + + (,,,, disputed,,) = game.claimData(i); + if (i == 0) { + game.attack{ value: bond }(disputed, i, Claim.wrap(bytes32(i))); + } else { + game.defend{ value: bond }(disputed, i, Claim.wrap(bytes32(i))); + } + } + (,,,,, parent,) = game.claimData(4); + pos = parent.move(true); + uint256 lastBond = game.getRequiredBond(pos); + (,,,, disputed,,) = game.claimData(4); + game.defend{ value: lastBond }(disputed, 4, _changeClaimStatus(ROOT_CLAIM, VMStatuses.INVALID)); + } + + // Expected start/disputed claims + bytes32 startingClaim = bytes32(uint256(3)); + Position startingPos = LibPosition.wrap(4, 14); + bytes32 disputedClaim = bytes32(uint256(0xFF)); + Position disputedPos = LibPosition.wrap(0, 0); + + // Expected local data. This should be `l2BlockNumber`, and not the actual bisected-to + // block, as we choose the minimum between the two. + bytes32 expectedNumber = bytes32(validL2BlockNumber << 0xC0); + uint256 expectedLen = 8; + uint256 l2NumberIdent = LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER; + + // Compute the preimage key for the local data + bytes32 localContext = keccak256(abi.encode(startingClaim, startingPos, disputedClaim, disputedPos)); + bytes32 rawKey = keccak256(abi.encode(l2NumberIdent | (1 << 248), address(game), localContext)); + bytes32 key = bytes32((uint256(rawKey) & ~uint256(0xFF << 248)) | (1 << 248)); + + IPreimageOracle oracle = IPreimageOracle(address(game.vm().oracle())); + game.addLocalData(l2NumberIdent, 5, 0); + + (bytes32 dat, uint256 datLen) = oracle.readPreimage(key, 0); + assertEq(dat >> 0xC0, bytes32(expectedLen)); + assertEq(datLen, expectedLen + 8); + + game.addLocalData(l2NumberIdent, 5, 8); + (dat, datLen) = oracle.readPreimage(key, 8); + assertEq(dat, expectedNumber); + assertEq(datLen, expectedLen); + } +} + +/// @title FaultDisputeGame_ChallengeRootL2Block_Test +/// @notice Tests the challengeRootL2Block functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_ChallengeRootL2Block_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that challenging the root claim's L2 block number by providing the real + /// preimage of the output root succeeds. + function testFuzz_challengeRootL2Block_succeeds( + bytes32 _storageRoot, + bytes32 _withdrawalRoot, + uint256 _l2BlockNumber + ) + public + { + _l2BlockNumber = bound(_l2BlockNumber, validL2BlockNumber, type(uint256).max - 1); + + (Types.OutputRootProof memory outputRootProof, bytes32 outputRoot, bytes memory headerRLP) = + _generateOutputRootProof(_storageRoot, _withdrawalRoot, abi.encodePacked(_l2BlockNumber)); + + // Create the dispute game with the output root at the wrong L2 block number. + uint256 wrongL2BlockNumber = bound(vm.randomUint(), _l2BlockNumber + 1, type(uint256).max); + IDisputeGame game = disputeGameFactory.create{ value: initBond }( + GAME_TYPE, Claim.wrap(outputRoot), abi.encode(wrongL2BlockNumber) + ); + + // Challenge the L2 block number. + IFaultDisputeGameV2 fdg = IFaultDisputeGameV2(address(game)); + fdg.challengeRootL2Block(outputRootProof, headerRLP); + + // Ensure that a duplicate challenge reverts. + vm.expectRevert(L2BlockNumberChallenged.selector); + fdg.challengeRootL2Block(outputRootProof, headerRLP); + + // Warp past the clocks, resolve the game. + vm.warp(block.timestamp + 3 days + 12 hours + 1); + fdg.resolveClaim(0, 0); + fdg.resolve(); + + // Ensure the challenge was successful. + assertEq(uint8(fdg.status()), uint8(GameStatus.CHALLENGER_WINS)); + assertTrue(fdg.l2BlockNumberChallenged()); + } + + /// @notice Tests that challenging the root claim's L2 block number by providing the real + /// preimage of the output root succeeds. Also, this claim should always receive the + /// bond when there is another counter that is as far left as possible. + function testFuzz_challengeRootL2Block_receivesBond_succeeds( + bytes32 _storageRoot, + bytes32 _withdrawalRoot, + uint256 _l2BlockNumber + ) + public + { + vm.deal(address(0xb0b), 1 ether); + _l2BlockNumber = bound(_l2BlockNumber, validL2BlockNumber, type(uint256).max - 1); + + (Types.OutputRootProof memory outputRootProof, bytes32 outputRoot, bytes memory headerRLP) = + _generateOutputRootProof(_storageRoot, _withdrawalRoot, abi.encodePacked(_l2BlockNumber)); + + // Create the dispute game with the output root at the wrong L2 block number. + disputeGameFactory.setInitBond(GAME_TYPE, 0.1 ether); + uint256 balanceBefore = address(this).balance; + _l2BlockNumber = bound(vm.randomUint(), _l2BlockNumber + 1, type(uint256).max); + IDisputeGame game = + disputeGameFactory.create{ value: 0.1 ether }(GAME_TYPE, Claim.wrap(outputRoot), abi.encode(_l2BlockNumber)); + IFaultDisputeGameV2 fdg = IFaultDisputeGameV2(address(game)); + + // Attack the root as 0xb0b + uint256 bond = _getRequiredBond(0); + (,,,, Claim disputed,,) = fdg.claimData(0); + vm.prank(address(0xb0b)); + fdg.attack{ value: bond }(disputed, 0, Claim.wrap(0)); + + // Challenge the L2 block number as 0xace. This claim should receive the root claim's bond. + vm.prank(address(0xace)); + fdg.challengeRootL2Block(outputRootProof, headerRLP); + + // Warp past the clocks, resolve the game. + vm.warp(block.timestamp + 3 days + 12 hours + 1); + fdg.resolveClaim(1, 0); + fdg.resolveClaim(0, 0); + fdg.resolve(); + + // Ensure the challenge was successful. + assertEq(uint8(fdg.status()), uint8(GameStatus.CHALLENGER_WINS)); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + fdg.closeGame(); + + // Claim credit once to trigger unlock period. + fdg.claimCredit(address(this)); + fdg.claimCredit(address(0xb0b)); + fdg.claimCredit(address(0xace)); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Claim credit + vm.expectRevert(NoCreditToClaim.selector); + fdg.claimCredit(address(this)); + fdg.claimCredit(address(0xb0b)); + fdg.claimCredit(address(0xace)); + + // Ensure that the party who challenged the L2 block number with the special move received + // the bond. + // - Root claim loses their bond + // - 0xace receives the root claim's bond + // - 0xb0b receives their bond back + assertEq(address(this).balance, balanceBefore - 0.1 ether); + assertEq(address(0xb0b).balance, 1 ether); + assertEq(address(0xace).balance, 0.1 ether); + } + + /// @notice Tests that challenging the root claim's L2 block number by providing the real + /// preimage of the output root never succeeds. + function testFuzz_challengeRootL2Block_rightBlockNumber_reverts( + bytes32 _storageRoot, + bytes32 _withdrawalRoot, + uint256 _l2BlockNumber + ) + public + { + _l2BlockNumber = bound(_l2BlockNumber, validL2BlockNumber, type(uint256).max); + + (Types.OutputRootProof memory outputRootProof, bytes32 outputRoot, bytes memory headerRLP) = + _generateOutputRootProof(_storageRoot, _withdrawalRoot, abi.encodePacked(_l2BlockNumber)); + + // Create the dispute game with the output root at the wrong L2 block number. + IDisputeGame game = + disputeGameFactory.create{ value: initBond }(GAME_TYPE, Claim.wrap(outputRoot), abi.encode(_l2BlockNumber)); + + // Challenge the L2 block number. + IFaultDisputeGameV2 fdg = IFaultDisputeGameV2(address(game)); + vm.expectRevert(BlockNumberMatches.selector); + fdg.challengeRootL2Block(outputRootProof, headerRLP); + + // Warp past the clocks, resolve the game. + vm.warp(block.timestamp + 3 days + 12 hours + 1); + fdg.resolveClaim(0, 0); + fdg.resolve(); + + // Ensure the challenge was successful. + assertEq(uint8(fdg.status()), uint8(GameStatus.DEFENDER_WINS)); + } + + /// @notice Tests that challenging the root claim's L2 block number with a bad output root + /// proof reverts. + function test_challengeRootL2Block_badProof_reverts() public { + Types.OutputRootProof memory outputRootProof = + Types.OutputRootProof({ version: 0, stateRoot: 0, messagePasserStorageRoot: 0, latestBlockhash: 0 }); + + vm.expectRevert(InvalidOutputRootProof.selector); + gameProxy.challengeRootL2Block(outputRootProof, hex""); + } + + /// @notice Tests that challenging the root claim's L2 block number with a bad output root + /// proof reverts. + function test_challengeRootL2Block_badHeaderRLP_reverts() public { + Types.OutputRootProof memory outputRootProof = + Types.OutputRootProof({ version: 0, stateRoot: 0, messagePasserStorageRoot: 0, latestBlockhash: 0 }); + bytes32 outputRoot = Hashing.hashOutputRootProof(outputRootProof); + + // Create the dispute game with the output root at the wrong L2 block number. + IDisputeGame game = disputeGameFactory.create{ value: initBond }( + GAME_TYPE, Claim.wrap(outputRoot), abi.encode(validL2BlockNumber) + ); + IFaultDisputeGameV2 fdg = IFaultDisputeGameV2(address(game)); + + vm.expectRevert(InvalidHeaderRLP.selector); + fdg.challengeRootL2Block(outputRootProof, hex""); + } + + /// @notice Tests that challenging the root claim's L2 block number with a bad output root + /// proof reverts. + function test_challengeRootL2Block_badHeaderRLPBlockNumberLength_reverts() public { + (Types.OutputRootProof memory outputRootProof, bytes32 outputRoot,) = + _generateOutputRootProof(0, 0, new bytes(64)); + + // Create the dispute game with the output root at the wrong L2 block number. + IDisputeGame game = disputeGameFactory.create{ value: initBond }( + GAME_TYPE, Claim.wrap(outputRoot), abi.encode(validL2BlockNumber) + ); + IFaultDisputeGameV2 fdg = IFaultDisputeGameV2(address(game)); + + vm.expectRevert(InvalidHeaderRLP.selector); + fdg.challengeRootL2Block(outputRootProof, hex""); + } +} + +/// @title FaultDisputeGame_Resolve_Test +/// @notice Tests the resolve functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_Resolve_Test is FaultDisputeGameV2_TestInit { + /// @notice Static unit test for the correctness an uncontested root resolution. + function test_resolve_rootUncontested_succeeds() public { + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + } + + /// @notice Static unit test for the correctness an uncontested root resolution. + function test_resolve_rootUncontestedClockNotExpired_succeeds() public { + vm.warp(block.timestamp + 3 days + 12 hours - 1 seconds); + vm.expectRevert(ClockNotExpired.selector); + gameProxy.resolveClaim(0, 0); + } + + /// @notice Static unit test for the correctness of a multi-part resolution of a single claim. + function test_resolve_multiPart_succeeds() public { + vm.deal(address(this), 10_000 ether); + + uint256 bond = _getRequiredBond(0); + for (uint256 i = 0; i < 2048; i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, Claim.wrap(bytes32(i))); + } + + // Warp past the clock period. + vm.warp(block.timestamp + 3 days + 12 hours + 1 seconds); + + // Resolve all children of the root subgame. Every single one of these will be uncontested. + for (uint256 i = 1; i <= 2048; i++) { + gameProxy.resolveClaim(i, 0); + } + + // Resolve the first half of the root claim subgame. + gameProxy.resolveClaim(0, 1024); + + // Fetch the resolution checkpoint for the root subgame and assert correctness. + (bool initCheckpoint, uint32 subgameIndex, Position leftmostPosition, address counteredBy) = + gameProxy.resolutionCheckpoints(0); + assertTrue(initCheckpoint); + assertEq(subgameIndex, 1024); + assertEq(leftmostPosition.raw(), Position.wrap(1).move(true).raw()); + assertEq(counteredBy, address(this)); + + // The root subgame should not be resolved. + assertFalse(gameProxy.resolvedSubgames(0)); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolve(); + + // Resolve the second half of the root claim subgame. + uint256 numToResolve = gameProxy.getNumToResolve(0); + assertEq(numToResolve, 1024); + gameProxy.resolveClaim(0, numToResolve); + + // Fetch the resolution checkpoint for the root subgame and assert correctness. + (initCheckpoint, subgameIndex, leftmostPosition, counteredBy) = gameProxy.resolutionCheckpoints(0); + assertTrue(initCheckpoint); + assertEq(subgameIndex, 2048); + assertEq(leftmostPosition.raw(), Position.wrap(1).move(true).raw()); + assertEq(counteredBy, address(this)); + + // The root subgame should now be resolved + assertTrue(gameProxy.resolvedSubgames(0)); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.CHALLENGER_WINS)); + } + + /// @notice Static unit test asserting that resolve reverts when the absolute root + /// subgame has not been resolved. + function test_resolve_rootUncontestedButUnresolved_reverts() public { + vm.warp(block.timestamp + 3 days + 12 hours); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolve(); + } + + /// @notice Static unit test asserting that resolve reverts when the game state is + /// not in progress. + function test_resolve_notInProgress_reverts() public { + uint256 chalWins = uint256(GameStatus.CHALLENGER_WINS); + + // Replace the game status in storage. It exists in slot 0 at offset 16. + uint256 slot = uint256(vm.load(address(gameProxy), bytes32(0))); + uint256 offset = 16 << 3; + uint256 mask = 0xFF << offset; + // Replace the byte in the slot value with the challenger wins status. + slot = (slot & ~mask) | (chalWins << offset); + + vm.store(address(gameProxy), bytes32(uint256(0)), bytes32(slot)); + vm.expectRevert(GameNotInProgress.selector); + gameProxy.resolveClaim(0, 0); + } + + /// @notice Static unit test for the correctness of resolving a single attack game state. + function test_resolve_rootContested_succeeds() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + + vm.warp(block.timestamp + 3 days + 12 hours); + + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.CHALLENGER_WINS)); + } + + /// @notice Static unit test for the correctness of resolving a game with a contested challenge + /// claim. + function test_resolve_challengeContested_succeeds() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.defend{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + + vm.warp(block.timestamp + 3 days + 12 hours); + + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + } + + /// @notice Static unit test for the correctness of resolving a game with multiplayer moves. + function test_resolve_teamDeathmatch_succeeds() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.defend{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + gameProxy.defend{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + + vm.warp(block.timestamp + 3 days + 12 hours); + + gameProxy.resolveClaim(4, 0); + gameProxy.resolveClaim(3, 0); + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.CHALLENGER_WINS)); + } + + /// @notice Static unit test for the correctness of resolving a game that reaches max game + /// depth. + function test_resolve_stepReached_succeeds() public { + Claim claim = _dummyClaim(); + for (uint256 i; i < gameProxy.splitDepth(); i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, claim); + } + + claim = _changeClaimStatus(claim, VMStatuses.PANIC); + for (uint256 i = gameProxy.claimDataLen() - 1; i < gameProxy.maxGameDepth(); i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, claim); + } + + vm.warp(block.timestamp + 3 days + 12 hours); + + for (uint256 i = 9; i > 0; i--) { + gameProxy.resolveClaim(i - 1, 0); + } + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + } + + /// @notice Static unit test asserting that resolve reverts when attempting to resolve a + /// subgame multiple times + function test_resolve_claimAlreadyResolved_reverts() public { + Claim claim = _dummyClaim(); + uint256 firstBond = _getRequiredBond(0); + vm.deal(address(this), firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: firstBond }(disputed, 0, claim); + uint256 secondBond = _getRequiredBond(1); + vm.deal(address(this), secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: secondBond }(disputed, 1, claim); + + vm.warp(block.timestamp + 3 days + 12 hours); + + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + + vm.expectRevert(ClaimAlreadyResolved.selector); + gameProxy.resolveClaim(1, 0); + } + + /// @notice Static unit test asserting that resolve reverts when attempting to resolve a + /// subgame at max depth + function test_resolve_claimAtMaxDepthAlreadyResolved_reverts() public { + Claim claim = _dummyClaim(); + for (uint256 i; i < gameProxy.splitDepth(); i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, claim); + } + + vm.deal(address(this), 10000 ether); + claim = _changeClaimStatus(claim, VMStatuses.PANIC); + for (uint256 i = gameProxy.claimDataLen() - 1; i < gameProxy.maxGameDepth(); i++) { + (,,,, Claim disputed,,) = gameProxy.claimData(i); + gameProxy.attack{ value: _getRequiredBond(i) }(disputed, i, claim); + } + + vm.warp(block.timestamp + 3 days + 12 hours); + + gameProxy.resolveClaim(8, 0); + + vm.expectRevert(ClaimAlreadyResolved.selector); + gameProxy.resolveClaim(8, 0); + } + + /// @notice Static unit test asserting that resolve reverts when attempting to resolve + /// subgames out of order + function test_resolve_outOfOrderResolution_reverts() public { + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + + vm.warp(block.timestamp + 3 days + 12 hours); + + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(0, 0); + } + + /// @notice Static unit test asserting that resolve pays out bonds on step, output bisection, + /// and execution trace moves. + function test_resolve_bondPayouts_succeeds() public { + // Give the test contract some ether + uint256 bal = 1000 ether; + vm.deal(address(this), bal); + + // Make claims all the way down the tree. + uint256 bond = _getRequiredBond(0); + uint256 totalBonded = bond; + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: bond }(disputed, 0, _dummyClaim()); + bond = _getRequiredBond(1); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: bond }(disputed, 1, _dummyClaim()); + bond = _getRequiredBond(2); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: bond }(disputed, 2, _dummyClaim()); + bond = _getRequiredBond(3); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: bond }(disputed, 3, _dummyClaim()); + bond = _getRequiredBond(4); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: bond }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + bond = _getRequiredBond(5); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: bond }(disputed, 5, _dummyClaim()); + bond = _getRequiredBond(6); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: bond }(disputed, 6, _dummyClaim()); + bond = _getRequiredBond(7); + totalBonded += bond; + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: bond }(disputed, 7, _dummyClaim()); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, absolutePrestateData, hex""); + + // Ensure that the step successfully countered the leaf claim. + (, address counteredBy,,,,,) = gameProxy.claimData(8); + assertEq(counteredBy, address(this)); + + // Ensure we bonded the correct amounts + assertEq(address(this).balance, bal - totalBonded); + assertEq(address(gameProxy).balance, 0); + assertEq(delayedWeth.balanceOf(address(gameProxy)), initBond + totalBonded); + + // Resolve all claims + vm.warp(block.timestamp + 3 days + 12 hours); + for (uint256 i = gameProxy.claimDataLen(); i > 0; i--) { + (bool success,) = address(gameProxy).call(abi.encodeCall(gameProxy.resolveClaim, (i - 1, 0))); + assertTrue(success); + } + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(address(this)); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Claim credit again to get the bond back. + gameProxy.claimCredit(address(this)); + + // Ensure that bonds were paid out correctly. + assertEq(address(this).balance, bal + initBond); + assertEq(address(gameProxy).balance, 0); + assertEq(delayedWeth.balanceOf(address(gameProxy)), 0); + + // Ensure that the init bond for the game is 0, in case we change it in the test suite in + // the future. + assertEq(disputeGameFactory.initBonds(GAME_TYPE), initBond); + } + + /// @notice Static unit test asserting that resolve pays out bonds on step, output bisection, + /// and execution trace moves with 2 actors and a dishonest root claim. + function test_resolve_bondPayoutsSeveralActors_succeeds() public { + // Give the test contract and bob some ether + // We use the "1000 ether" literal for `bal`, the initial balance, to avoid stack too deep + //uint256 bal = 1000 ether; + address bob = address(0xb0b); + vm.deal(address(this), 1000 ether); + vm.deal(bob, 1000 ether); + + // Make claims all the way down the tree, trading off between bob and the test contract. + uint256 firstBond = _getRequiredBond(0); + uint256 thisBonded = firstBond; + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: firstBond }(disputed, 0, _dummyClaim()); + + uint256 secondBond = _getRequiredBond(1); + uint256 bobBonded = secondBond; + (,,,, disputed,,) = gameProxy.claimData(1); + vm.prank(bob); + gameProxy.attack{ value: secondBond }(disputed, 1, _dummyClaim()); + + uint256 thirdBond = _getRequiredBond(2); + thisBonded += thirdBond; + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: thirdBond }(disputed, 2, _dummyClaim()); + + uint256 fourthBond = _getRequiredBond(3); + bobBonded += fourthBond; + (,,,, disputed,,) = gameProxy.claimData(3); + vm.prank(bob); + gameProxy.attack{ value: fourthBond }(disputed, 3, _dummyClaim()); + + uint256 fifthBond = _getRequiredBond(4); + thisBonded += fifthBond; + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: fifthBond }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + + uint256 sixthBond = _getRequiredBond(5); + bobBonded += sixthBond; + (,,,, disputed,,) = gameProxy.claimData(5); + vm.prank(bob); + gameProxy.attack{ value: sixthBond }(disputed, 5, _dummyClaim()); + + uint256 seventhBond = _getRequiredBond(6); + thisBonded += seventhBond; + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: seventhBond }(disputed, 6, _dummyClaim()); + + uint256 eighthBond = _getRequiredBond(7); + bobBonded += eighthBond; + (,,,, disputed,,) = gameProxy.claimData(7); + vm.prank(bob); + gameProxy.attack{ value: eighthBond }(disputed, 7, _dummyClaim()); + + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + gameProxy.step(8, true, absolutePrestateData, hex""); + + // Ensure that the step successfully countered the leaf claim. + (, address counteredBy,,,,,) = gameProxy.claimData(8); + assertEq(counteredBy, address(this)); + + // Ensure we bonded the correct amounts + assertEq(address(this).balance, 1000 ether - thisBonded); + assertEq(bob.balance, 1000 ether - bobBonded); + assertEq(address(gameProxy).balance, 0); + assertEq(delayedWeth.balanceOf(address(gameProxy)), initBond + thisBonded + bobBonded); + + // Resolve all claims + vm.warp(block.timestamp + 3 days + 12 hours); + for (uint256 i = gameProxy.claimDataLen(); i > 0; i--) { + (bool success,) = address(gameProxy).call(abi.encodeCall(gameProxy.resolveClaim, (i - 1, 0))); + assertTrue(success); + } + + // Resolve the game. + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(address(this)); + gameProxy.claimCredit(bob); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Claim credit again to get the bond back. + gameProxy.claimCredit(address(this)); + + // Bob's claim should revert since it's value is 0 + vm.expectRevert(NoCreditToClaim.selector); + gameProxy.claimCredit(bob); + + // Ensure that bonds were paid out correctly. + assertEq(address(this).balance, 1000 ether + initBond + bobBonded); + assertEq(bob.balance, 1000 ether - bobBonded); + assertEq(address(gameProxy).balance, 0); + assertEq(delayedWeth.balanceOf(address(gameProxy)), 0); + + // Ensure that the init bond for the game is 0, in case we change it in the test suite in + // the future. + assertEq(disputeGameFactory.initBonds(GAME_TYPE), initBond); + } + + /// @notice Static unit test asserting that resolve pays out bonds on moves to the leftmost + /// actor in subgames containing successful counters. + function test_resolve_leftmostBondPayout_succeeds() public { + uint256 bal = 1000 ether; + address alice = address(0xa11ce); + address bob = address(0xb0b); + address charlie = address(0xc0c); + vm.deal(address(this), bal); + vm.deal(alice, bal); + vm.deal(bob, bal); + vm.deal(charlie, bal); + + // Make claims with bob, charlie and the test contract on defense, and alice as the + // challenger charlie is successfully countered by alice alice is successfully countered by + // both bob and the test contract + uint256 firstBond = _getRequiredBond(0); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.prank(alice); + gameProxy.attack{ value: firstBond }(disputed, 0, _dummyClaim()); + + uint256 secondBond = _getRequiredBond(1); + (,,,, disputed,,) = gameProxy.claimData(1); + vm.prank(bob); + gameProxy.defend{ value: secondBond }(disputed, 1, _dummyClaim()); + vm.prank(charlie); + gameProxy.attack{ value: secondBond }(disputed, 1, _dummyClaim()); + gameProxy.attack{ value: secondBond }(disputed, 1, _dummyClaim()); + + uint256 thirdBond = _getRequiredBond(3); + (,,,, disputed,,) = gameProxy.claimData(3); + vm.prank(alice); + gameProxy.attack{ value: thirdBond }(disputed, 3, _dummyClaim()); + + // Resolve all claims + vm.warp(block.timestamp + 3 days + 12 hours); + for (uint256 i = gameProxy.claimDataLen(); i > 0; i--) { + (bool success,) = address(gameProxy).call(abi.encodeCall(gameProxy.resolveClaim, (i - 1, 0))); + assertTrue(success); + } + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(address(this)); + gameProxy.claimCredit(alice); + gameProxy.claimCredit(bob); + gameProxy.claimCredit(charlie); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // All of these claims should work. + gameProxy.claimCredit(address(this)); + gameProxy.claimCredit(alice); + gameProxy.claimCredit(bob); + + // Charlie's claim should revert since it's value is 0 + vm.expectRevert(NoCreditToClaim.selector); + gameProxy.claimCredit(charlie); + + // Ensure that bonds were paid out correctly. + uint256 aliceLosses = firstBond; + uint256 charlieLosses = secondBond; + assertEq(address(this).balance, bal + aliceLosses + initBond, "incorrect this balance"); + assertEq(alice.balance, bal - aliceLosses + charlieLosses, "incorrect alice balance"); + assertEq(bob.balance, bal, "incorrect bob balance"); + assertEq(charlie.balance, bal - charlieLosses, "incorrect charlie balance"); + assertEq(address(gameProxy).balance, 0); + + // Ensure that the init bond for the game is 0, in case we change it in the test suite in + // the future. + assertEq(disputeGameFactory.initBonds(GAME_TYPE), initBond); + } + + /// @notice Static unit test asserting that the anchor state updates when the game resolves in + /// favor of the defender and the anchor state is older than the game state. + function test_resolve_validNewerStateUpdatesAnchor_succeeds() public { + // Confirm that the anchor state is older than the game state. + (Hash root, uint256 l2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assert(l2BlockNumber < gameProxy.l2BlockNumber()); + + // Resolve the game. + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Confirm that the anchor state is now the same as the game state. + (root, l2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assertEq(l2BlockNumber, gameProxy.l2BlockNumber()); + assertEq(root.raw(), gameProxy.rootClaim().raw()); + } + + /// @notice Static unit test asserting that the anchor state does not change when the game + /// resolves in favor of the defender but the game state is not newer than the anchor + /// state. + function test_resolve_validOlderStateSameAnchor_succeeds() public { + // Mock the game block to be older than the game state. + vm.mockCall(address(gameProxy), abi.encodeCall(gameProxy.l2SequenceNumber, ()), abi.encode(0)); + + // Confirm that the anchor state is newer than the game state. + (Hash root, uint256 l2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assert(l2BlockNumber >= gameProxy.l2SequenceNumber()); + + // Resolve the game. + vm.mockCall(address(gameProxy), abi.encodeCall(gameProxy.l2SequenceNumber, ()), abi.encode(0)); + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Confirm that the anchor state is the same as the initial anchor state. + (Hash updatedRoot, uint256 updatedL2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assertEq(updatedL2BlockNumber, l2BlockNumber); + assertEq(updatedRoot.raw(), root.raw()); + } + + /// @notice Static unit test asserting that the anchor state does not change when the game + /// resolves in favor of the challenger, even if the game state is newer than the + /// anchor state. + function test_resolve_invalidStateSameAnchor_succeeds() public { + // Confirm that the anchor state is older than the game state. + (Hash root, uint256 l2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assert(l2BlockNumber < gameProxy.l2BlockNumber()); + + // Challenge the claim and resolve it. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.CHALLENGER_WINS)); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Confirm that the anchor state is the same as the initial anchor state. + (Hash updatedRoot, uint256 updatedL2BlockNumber) = anchorStateRegistry.anchors(gameProxy.gameType()); + assertEq(updatedL2BlockNumber, l2BlockNumber); + assertEq(updatedRoot.raw(), root.raw()); + } +} + +/// @title FaultDisputeGame_GameType_Test +/// @notice Tests the `gameType` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_GameType_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's type is set correctly. + function test_gameType_succeeds() public view { + assertEq(gameProxy.gameType().raw(), GAME_TYPE.raw()); + } +} + +/// @title FaultDisputeGame_RootClaim_Test +/// @notice Tests the `rootClaim` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_RootClaim_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's root claim is set correctly. + function test_rootClaim_succeeds() public view { + assertEq(gameProxy.rootClaim().raw(), ROOT_CLAIM.raw()); + } +} + +/// @title FaultDisputeGame_ExtraData_Test +/// @notice Tests the `extraData` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_ExtraData_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's extra data is set correctly. + function test_extraData_succeeds() public view { + assertEq(gameProxy.extraData(), extraData); + } +} + +/// @title FaultDisputeGame_GameData_Test +/// @notice Tests the `gameData` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_GameData_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's data is set correctly. + function test_gameData_succeeds() public view { + (GameType gameType, Claim rootClaim, bytes memory _extraData) = gameProxy.gameData(); + + assertEq(gameType.raw(), GAME_TYPE.raw()); + assertEq(rootClaim.raw(), ROOT_CLAIM.raw()); + assertEq(_extraData, extraData); + } +} + +/// @title FaultDisputeGame_GetRequiredBond_Test +/// @notice Tests the `getRequiredBond` function of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_GetRequiredBond_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the bond during the bisection game depths is correct. + function test_getRequiredBond_succeeds() public view { + for (uint8 i = 0; i < uint8(gameProxy.splitDepth()); i++) { + Position pos = LibPosition.wrap(i, 0); + uint256 bond = gameProxy.getRequiredBond(pos); + + // Reasonable approximation for a max depth of 8. + uint256 expected = 0.08 ether; + for (uint64 j = 0; j < i; j++) { + expected = expected * 22876; + expected = expected / 10000; + } + + assertApproxEqAbs(bond, expected, 0.01 ether); + } + } + + /// @notice Tests that the bond at a depth greater than the maximum game depth reverts. + function test_getRequiredBond_outOfBounds_reverts() public { + Position pos = LibPosition.wrap(uint8(gameProxy.maxGameDepth() + 1), 0); + vm.expectRevert(GameDepthExceeded.selector); + gameProxy.getRequiredBond(pos); + } +} + +/// @title FaultDisputeGame_ClaimCredit_Test +/// @notice Tests the claimCredit functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_ClaimCredit_Test is FaultDisputeGameV2_TestInit { + function test_claimCredit_refundMode_succeeds() public { + // Set up actors. + address alice = address(0xa11ce); + address bob = address(0xb0b); + + // Give the game proxy 1 extra ether, unregistered. + vm.deal(address(gameProxy), 1 ether); + + // Perform a bonded move. + Claim claim = _dummyClaim(); + + // Bond the first claim. + uint256 firstBond = _getRequiredBond(0); + vm.deal(alice, firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.prank(alice); + gameProxy.attack{ value: firstBond }(disputed, 0, claim); + + // Bond the second claim. + uint256 secondBond = _getRequiredBond(1); + vm.deal(bob, secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + vm.prank(bob); + gameProxy.attack{ value: secondBond }(disputed, 1, claim); + + // Warp past the finalization period + vm.warp(block.timestamp + 3 days + 12 hours); + + // Resolve the game. + // Second claim wins, so bob should get alice's credit. + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Mock that the game proxy is not proper, trigger refund mode. + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(anchorStateRegistry.isGameProper, (gameProxy)), + abi.encode(false) + ); + + // Close the game. + gameProxy.closeGame(); + + // Assert bond distribution mode is refund mode. + assertTrue(gameProxy.bondDistributionMode() == BondDistributionMode.REFUND); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(alice); + gameProxy.claimCredit(bob); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Grab balances before claim. + uint256 aliceBalanceBefore = alice.balance; + uint256 bobBalanceBefore = bob.balance; + + // Claim credit again to get the bond back. + gameProxy.claimCredit(alice); + gameProxy.claimCredit(bob); + + // Should have original balance again. + assertEq(alice.balance, aliceBalanceBefore + firstBond); + assertEq(bob.balance, bobBalanceBefore + secondBond); + } + + /// @notice Tests that claimCredit reverts if the game is paused. + function test_claimCredit_gamePaused_reverts() public { + // Pause the system with the Superchain-wide identifier (address(0)). + vm.prank(superchainConfig.guardian()); + superchainConfig.pause(address(0)); + + // Attempting to claim credit should now revert. + vm.expectRevert(GamePaused.selector); + gameProxy.claimCredit(address(0)); + } + + /// @notice Static unit test asserting that credit may not be drained past allowance through + /// reentrancy. + function test_claimCredit_claimAlreadyResolved_reverts() public { + ClaimCreditReenter reenter = new ClaimCreditReenter(gameProxy, vm); + vm.startPrank(address(reenter)); + + // Give the game proxy 1 extra ether, unregistered. + vm.deal(address(gameProxy), 1 ether); + + // Perform a bonded move. + Claim claim = _dummyClaim(); + uint256 firstBond = _getRequiredBond(0); + vm.deal(address(reenter), firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: firstBond }(disputed, 0, claim); + uint256 secondBond = _getRequiredBond(1); + vm.deal(address(reenter), secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: secondBond }(disputed, 1, claim); + uint256 reenterBond = firstBond + secondBond; + + // Warp past the finalization period + vm.warp(block.timestamp + 3 days + 12 hours); + + // Ensure that we bonded all the test contract's ETH + assertEq(address(reenter).balance, 0); + // Ensure the game proxy has 1 ether in it. + assertEq(address(gameProxy).balance, 1 ether); + // Ensure the game has a balance of reenterBond in the delayedWeth contract. + assertEq(delayedWeth.balanceOf(address(gameProxy)), initBond + reenterBond); + + // Resolve the claim at index 2 first so that index 1 can be resolved. + gameProxy.resolveClaim(2, 0); + + // Resolve the claim at index 1 and claim the reenter contract's credit. + gameProxy.resolveClaim(1, 0); + + // Ensure that the game registered the `reenter` contract's credit. + assertEq(gameProxy.credit(address(reenter)), reenterBond); + + // Resolve the root claim. + gameProxy.resolveClaim(0, 0); + + // Resolve the game. + gameProxy.resolve(); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(address(reenter)); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Initiate the reentrant credit claim. + reenter.claimCredit(address(reenter)); + + // The reenter contract should have performed 2 calls to `claimCredit`. + // Once all the credit is claimed, all subsequent calls will revert since there is 0 credit + // left to claim. + // The claimant must only have received the amount bonded for the gindex 1 subgame. + // The root claim bond and the unregistered ETH should still exist in the game proxy. + assertEq(reenter.numCalls(), 2); + assertEq(address(reenter).balance, reenterBond); + assertEq(address(gameProxy).balance, 1 ether); + assertEq(delayedWeth.balanceOf(address(gameProxy)), initBond); + + vm.stopPrank(); + } + + /// @notice Tests that claimCredit reverts when recipient can't receive value. + function test_claimCredit_recipientCantReceiveValue_reverts() public { + // Set up actors. + address alice = address(0xa11ce); + address bob = address(0xb0b); + + // Give the game proxy 1 extra ether, unregistered. + vm.deal(address(gameProxy), 1 ether); + + // Perform a bonded move. + Claim claim = _dummyClaim(); + + // Bond the first claim. + uint256 firstBond = _getRequiredBond(0); + vm.deal(alice, firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.prank(alice); + gameProxy.attack{ value: firstBond }(disputed, 0, claim); + + // Bond the second claim. + uint256 secondBond = _getRequiredBond(1); + vm.deal(bob, secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + vm.prank(bob); + gameProxy.attack{ value: secondBond }(disputed, 1, claim); + + // Warp past the finalization period + vm.warp(block.timestamp + 3 days + 12 hours); + + // Resolve the game. + // Second claim wins, so bob should get alice's credit. + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay. + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game. + gameProxy.closeGame(); + + // Claim credit once to trigger unlock period. + gameProxy.claimCredit(alice); + gameProxy.claimCredit(bob); + + // Wait for the withdrawal delay. + vm.warp(block.timestamp + delayedWeth.delay() + 1 seconds); + + // Make bob not be able to receive value by setting his contract code to something without + // `receive` + vm.etch(address(bob), address(L1Token).code); + + vm.expectRevert(BondTransferFailed.selector); + gameProxy.claimCredit(address(bob)); + } +} + +/// @title FaultDisputeGame_CloseGame_Test +/// @notice Tests the closeGame functionality of the `FaultDisputeGame` contract. +contract FaultDisputeGameV2_CloseGame_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that closeGame reverts if the game is not resolved + function test_closeGame_gameNotResolved_reverts() public { + vm.expectRevert(GameNotResolved.selector); + gameProxy.closeGame(); + } + + /// @notice Tests that closeGame reverts if the game is paused + function test_closeGame_gamePaused_reverts() public { + // Pause the system with the Superchain-wide identifier (address(0)). + vm.prank(superchainConfig.guardian()); + superchainConfig.pause(address(0)); + + // Attempting to close the game should now revert. + vm.expectRevert(GamePaused.selector); + gameProxy.closeGame(); + } + + /// @notice Tests that closeGame reverts if the game is not finalized + function test_closeGame_gameNotFinalized_reverts() public { + // Resolve the game + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Don't wait the finalization delay + vm.expectRevert(GameNotFinalized.selector); + gameProxy.closeGame(); + } + + /// @notice Tests that closeGame succeeds for a proper game (normal distribution) + function test_closeGame_properGame_succeeds() public { + // Resolve the game + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Close the game and verify normal distribution mode + vm.expectEmit(true, true, true, true); + emit GameClosed(BondDistributionMode.NORMAL); + gameProxy.closeGame(); + assertEq(uint8(gameProxy.bondDistributionMode()), uint8(BondDistributionMode.NORMAL)); + + // Check that the anchor state was set correctly. + assertEq(address(gameProxy.anchorStateRegistry().anchorGame()), address(gameProxy)); + } + + /// @notice Tests that closeGame succeeds for an improper game (refund mode) + function test_closeGame_improperGame_succeeds() public { + // Resolve the game + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Mock the anchor registry to return improper game + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(anchorStateRegistry.isGameProper, (IDisputeGame(address(gameProxy)))), + abi.encode(false, "") + ); + + // Close the game and verify refund mode + vm.expectEmit(true, true, true, true); + emit GameClosed(BondDistributionMode.REFUND); + gameProxy.closeGame(); + assertEq(uint8(gameProxy.bondDistributionMode()), uint8(BondDistributionMode.REFUND)); + } + + /// @notice Tests that multiple calls to closeGame succeed after initial distribution mode is + /// set + function test_closeGame_multipleCallsAfterSet_succeeds() public { + // Resolve and close the game first + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // First close sets the mode + gameProxy.closeGame(); + assertEq(uint8(gameProxy.bondDistributionMode()), uint8(BondDistributionMode.NORMAL)); + + // Subsequent closes should succeed without changing the mode + gameProxy.closeGame(); + assertEq(uint8(gameProxy.bondDistributionMode()), uint8(BondDistributionMode.NORMAL)); + + gameProxy.closeGame(); + assertEq(uint8(gameProxy.bondDistributionMode()), uint8(BondDistributionMode.NORMAL)); + } + + /// @notice Tests that closeGame called with any amount of gas either reverts (with OOG) or + /// updates the anchor state. This is specifically to verify that the try/catch inside + /// closeGame can't be called with just enough gas to OOG when calling the + /// AnchorStateRegistry but successfully execute the remainder of the function. + /// @param _gas Amount of gas to provide to closeGame. + function testFuzz_closeGame_canUpdateAnchorStateAndDoes_succeeds(uint256 _gas) public { + // Resolve and close the game first + vm.warp(block.timestamp + 3 days + 12 hours); + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + // Wait for finalization delay + vm.warp(block.timestamp + 3.5 days + 1 seconds); + + // Since providing *too* much gas isn't the issue here, bounding it to half the block gas + // limit is sufficient. We want to know that either (1) the function reverts or (2) the + // anchor state gets updated. If the function doesn't revert and the anchor state isn't + // updated then we have a problem. + _gas = bound(_gas, 0, block.gaslimit / 2); + + // The anchor state should not be the game proxy. + assert(address(gameProxy.anchorStateRegistry().anchorGame()) != address(gameProxy)); + + // Try closing the game. + try gameProxy.closeGame{ gas: _gas }() { + // If we got here, the function didn't revert, so the anchor state should have updated. + assert(address(gameProxy.anchorStateRegistry().anchorGame()) == address(gameProxy)); + } catch { + // Ok, function reverted. + } + } +} + +/// @title FaultDisputeGame_GetChallengerDuration_Test +/// @notice Tests the getChallengerDuration functionality and related resolution tests. +contract FaultDisputeGameV2_GetChallengerDuration_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that if the game is not in progress, querying of `getChallengerDuration` + /// reverts + function test_getChallengerDuration_gameNotInProgress_reverts() public { + // resolve the game + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw()); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + + vm.expectRevert(GameNotInProgress.selector); + gameProxy.getChallengerDuration(1); + } + + /// @notice Static unit test asserting that resolveClaim isn't possible if there's time left + /// for a counter. + function test_resolution_lastSecondDisputes_succeeds() public { + // The honest proposer created an honest root claim during setup - node 0 + + // Defender's turn + vm.warp(block.timestamp + 3.5 days - 1 seconds); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + // Chess clock time accumulated: + assertEq(gameProxy.getChallengerDuration(0).raw(), 3.5 days - 1 seconds); + assertEq(gameProxy.getChallengerDuration(1).raw(), 0); + + // Advance time by 1 second, so that the root claim challenger clock is expired. + vm.warp(block.timestamp + 1 seconds); + // Attempt a second attack against the root claim. This should revert since the challenger + // clock is expired. + uint256 expectedBond = _getRequiredBond(0); + vm.expectRevert(ClockTimeExceeded.selector); + gameProxy.attack{ value: expectedBond }(disputed, 0, _dummyClaim()); + // Chess clock time accumulated: + assertEq(gameProxy.getChallengerDuration(0).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(1).raw(), 1 seconds); + + // Should not be able to resolve the root claim or second counter yet. + vm.expectRevert(ClockNotExpired.selector); + gameProxy.resolveClaim(1, 0); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(0, 0); + + // Warp to the last second of the root claim defender clock. + vm.warp(block.timestamp + 3.5 days - 2 seconds); + // Attack the challenge to the root claim. This should succeed, since the defender clock is + // not expired. + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + // Chess clock time accumulated: + assertEq(gameProxy.getChallengerDuration(0).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(1).raw(), 3.5 days - 1 seconds); + assertEq(gameProxy.getChallengerDuration(2).raw(), 3.5 days - gameProxy.clockExtension().raw()); + + // Should not be able to resolve any claims yet. + vm.expectRevert(ClockNotExpired.selector); + gameProxy.resolveClaim(2, 0); + vm.expectRevert(ClockNotExpired.selector); + gameProxy.resolveClaim(1, 0); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(0, 0); + + vm.warp(block.timestamp + gameProxy.clockExtension().raw() - 1 seconds); + + // Should not be able to resolve any claims yet. + vm.expectRevert(ClockNotExpired.selector); + gameProxy.resolveClaim(2, 0); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(1, 0); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(0, 0); + + // Chess clock time accumulated: + assertEq(gameProxy.getChallengerDuration(0).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(1).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(2).raw(), 3.5 days - 1 seconds); + + // Warp past the challenge period for the root claim defender. Defending the root claim + // should now revert. + vm.warp(block.timestamp + 1 seconds); + expectedBond = _getRequiredBond(1); + vm.expectRevert(ClockTimeExceeded.selector); // no further move can be made + gameProxy.attack{ value: expectedBond }(disputed, 1, _dummyClaim()); + expectedBond = _getRequiredBond(2); + (,,,, disputed,,) = gameProxy.claimData(2); + vm.expectRevert(ClockTimeExceeded.selector); // no further move can be made + gameProxy.attack{ value: expectedBond }(disputed, 2, _dummyClaim()); + // Chess clock time accumulated: + assertEq(gameProxy.getChallengerDuration(0).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(1).raw(), 3.5 days); + assertEq(gameProxy.getChallengerDuration(2).raw(), 3.5 days); + + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(1, 0); + vm.expectRevert(OutOfOrderResolution.selector); + gameProxy.resolveClaim(0, 0); + + // All clocks are expired. Resolve the game. + gameProxy.resolveClaim(2, 0); // Node 2 is resolved as UNCOUNTERED by default since it has no children + gameProxy.resolveClaim(1, 0); // Node 1 is resolved as COUNTERED since it has an UNCOUNTERED child + gameProxy.resolveClaim(0, 0); // Node 0 is resolved as UNCOUNTERED since it has no UNCOUNTERED children + + // Defender wins game since the root claim is uncountered + assertEq(uint8(gameProxy.resolve()), uint8(GameStatus.DEFENDER_WINS)); + } +} + +/// @title FaultDisputeGameV2_Uncategorized_Test +/// @notice General tests that are not testing any function directly of the `FaultDisputeGame` +/// contract or are testing multiple functions at once. +contract FaultDisputeGameV2_Uncategorized_Test is FaultDisputeGameV2_TestInit { + /// @notice Tests that the game's starting timestamp is set correctly. + function test_createdAt_succeeds() public view { + assertEq(gameProxy.createdAt().raw(), block.timestamp); + } + + /// @notice Tests that startingOutputRoot and it's getters are set correctly. + function test_startingOutputRootGetters_succeeds() public view { + (Hash root, uint256 l2BlockNumber) = gameProxy.startingOutputRoot(); + (Hash anchorRoot, uint256 anchorRootBlockNumber) = anchorStateRegistry.anchors(GAME_TYPE); + + assertEq(gameProxy.startingBlockNumber(), l2BlockNumber); + assertEq(gameProxy.startingBlockNumber(), anchorRootBlockNumber); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(root)); + assertEq(Hash.unwrap(gameProxy.startingRootHash()), Hash.unwrap(anchorRoot)); + } + + /// @notice Tests that the user cannot control the first 4 bytes of the CWIA data, disallowing + /// them to control the entrypoint when no calldata is provided to a call. + function test_cwiaCalldata_userCannotControlSelector_succeeds() public { + // Construct the expected CWIA data that the proxy will pass to the implementation, + // alongside any extra calldata passed by the user. + Hash l1Head = gameProxy.l1Head(); + bytes memory cwiaData = abi.encodePacked(address(this), gameProxy.rootClaim(), l1Head, gameProxy.extraData()); + + // We expect a `ReceiveETH` event to be emitted when 0 bytes of calldata are sent; The + // fallback is always reached *within the minimal proxy* in `LibClone`'s version of + // `clones-with-immutable-args` + vm.expectEmit(false, false, false, true); + emit ReceiveETH(0); + // We expect no delegatecall to the implementation contract if 0 bytes are sent. Assert + // that this happens 0 times. + vm.expectCall(address(gameImpl), cwiaData, 0); + (bool successA,) = address(gameProxy).call(hex""); + assertTrue(successA); + + // When calldata is forwarded, we do expect a delegatecall to the implementation. + bytes memory data = abi.encodePacked(gameProxy.l1Head.selector); + vm.expectCall(address(gameImpl), abi.encodePacked(data, cwiaData), 1); + (bool successB, bytes memory returnData) = address(gameProxy).call(data); + assertTrue(successB); + assertEq(returnData, abi.encode(l1Head)); + } +} + +contract FaultDispute_1v1_Actors_Test is FaultDisputeGameV2_TestInit { + /// @notice The honest actor + DisputeActor internal honest; + /// @notice The dishonest actor + DisputeActor internal dishonest; + + function setUp() public override { + // Setup the `FaultDisputeGame` + super.setUp(); + } + + /// @notice Fuzz test for a 1v1 output bisection dispute. + /// @notice The alphabet game has a constant status byte, and is not safe from someone being + /// dishonest in output bisection and then posting a correct execution trace bisection + /// root claim. This test does not cover this case (i.e. root claim of output bisection + /// is dishonest, root claim of execution trace bisection is made by the dishonest + /// actor but is honest, honest actor cannot attack it without risk of losing). + function testFuzz_outputBisection1v1honestRoot_succeeds(uint8 _divergeOutput, uint8 _divergeStep) public { + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + uint256 divergeAtOutput = bound(_divergeOutput, 0, 15); + uint256 divergeAtStep = bound(_divergeStep, 0, 7); + uint256 divergeStepOffset = (divergeAtOutput << 4) + divergeAtStep; + + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i >= divergeAtOutput ? 0xFF : i + 1; + } + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i >= divergeStepOffset ? bytes1(uint8(0xFF)) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1honestRootGenesisAbsolutePrestate_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are from [2, 17] in this game. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i + 2; + } + // The dishonest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of all set bits. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = bytes1(0xFF); + } + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1dishonestRootGenesisAbsolutePrestate_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are from [2, 17] in this game. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i + 2; + } + // The dishonest trace covers all block -> block + 1 transitions, and is 256 bytes long, consisting + // of all set bits. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = bytes1(0xFF); + } + + // Run the actor test + _actorTest({ + _rootClaim: 17, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.CHALLENGER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1honestRoot_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, consisting + // of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are from [2, 17] in this game. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i + 2; + } + // The dishonest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of all zeros. + bytes memory dishonestTrace = new bytes(256); + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1dishonestRoot_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are from [2, 17] in this game. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i + 2; + } + // The dishonest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of all zeros. + bytes memory dishonestTrace = new bytes(256); + + // Run the actor test + _actorTest({ + _rootClaim: 17, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.CHALLENGER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1correctRootHalfWay_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace is half correct, half incorrect. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > (127 + 4) ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1dishonestRootHalfWay_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace is half correct, half incorrect. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > (127 + 4) ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 0xFF, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.CHALLENGER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1correctAbsolutePrestate_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace correct is half correct, half incorrect. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > 127 ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1dishonestAbsolutePrestate_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace correct is half correct, half incorrect. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > 127 ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 0xFF, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.CHALLENGER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1honestRootFinalInstruction_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace is half correct, and correct all the way up to the final instruction + // of the exec subgame. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > (127 + 7) ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 16, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.DEFENDER_WINS + }); + } + + /// @notice Static unit test for a 1v1 output bisection dispute. + function test_static_1v1dishonestRootFinalInstruction_succeeds() public { + // The honest l2 outputs are from [1, 16] in this game. + uint256[] memory honestL2Outputs = new uint256[](16); + for (uint256 i; i < honestL2Outputs.length; i++) { + honestL2Outputs[i] = i + 1; + } + // The honest trace covers all block -> block + 1 transitions, and is 256 bytes long, + // consisting of bytes [0, 255]. + bytes memory honestTrace = new bytes(256); + for (uint256 i; i < honestTrace.length; i++) { + honestTrace[i] = bytes1(uint8(i)); + } + + // The dishonest l2 outputs are half correct, half incorrect. + uint256[] memory dishonestL2Outputs = new uint256[](16); + for (uint256 i; i < dishonestL2Outputs.length; i++) { + dishonestL2Outputs[i] = i > 7 ? 0xFF : i + 1; + } + // The dishonest trace is half correct, and correct all the way up to the final instruction + // of the exec subgame. + bytes memory dishonestTrace = new bytes(256); + for (uint256 i; i < dishonestTrace.length; i++) { + dishonestTrace[i] = i > (127 + 7) ? bytes1(0xFF) : bytes1(uint8(i)); + } + + // Run the actor test + _actorTest({ + _rootClaim: 0xFF, + _absolutePrestateData: 0, + _honestTrace: honestTrace, + _honestL2Outputs: honestL2Outputs, + _dishonestTrace: dishonestTrace, + _dishonestL2Outputs: dishonestL2Outputs, + _expectedStatus: GameStatus.CHALLENGER_WINS + }); + } + + //////////////////////////////////////////////////////////////// + // HELPERS // + //////////////////////////////////////////////////////////////// + + /// @notice Helper to run a 1v1 actor test + function _actorTest( + uint256 _rootClaim, + uint256 _absolutePrestateData, + bytes memory _honestTrace, + uint256[] memory _honestL2Outputs, + bytes memory _dishonestTrace, + uint256[] memory _dishonestL2Outputs, + GameStatus _expectedStatus + ) + internal + { + if (isForkTest()) { + // Mock the call anchorStateRegistry.getAnchorRoot() to return 0 as the block number + (Hash root,) = anchorStateRegistry.getAnchorRoot(); + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(IAnchorStateRegistry.getAnchorRoot, ()), + abi.encode(root, 0) + ); + } + + // Setup the environment + bytes memory absolutePrestateData = + _setup({ _absolutePrestateData: _absolutePrestateData, _rootClaim: _rootClaim }); + + // Create actors + _createActors({ + _honestTrace: _honestTrace, + _honestPreStateData: absolutePrestateData, + _honestL2Outputs: _honestL2Outputs, + _dishonestTrace: _dishonestTrace, + _dishonestPreStateData: absolutePrestateData, + _dishonestL2Outputs: _dishonestL2Outputs + }); + + // Exhaust all moves from both actors + _exhaustMoves(); + + // Resolve the game and assert that the defender won + _warpAndResolve(); + assertEq(uint8(gameProxy.status()), uint8(_expectedStatus)); + } + + /// @notice Helper to setup the 1v1 test + function _setup( + uint256 _absolutePrestateData, + uint256 _rootClaim + ) + internal + returns (bytes memory absolutePrestateData_) + { + absolutePrestateData_ = abi.encode(_absolutePrestateData); + Claim absolutePrestateExec = + _changeClaimStatus(Claim.wrap(keccak256(absolutePrestateData_)), VMStatuses.UNFINISHED); + Claim rootClaim = Claim.wrap(bytes32(uint256(_rootClaim))); + super.init({ rootClaim: rootClaim, absolutePrestate: absolutePrestateExec, l2BlockNumber: _rootClaim }); + } + + /// @notice Helper to create actors for the 1v1 dispute. + function _createActors( + bytes memory _honestTrace, + bytes memory _honestPreStateData, + uint256[] memory _honestL2Outputs, + bytes memory _dishonestTrace, + bytes memory _dishonestPreStateData, + uint256[] memory _dishonestL2Outputs + ) + internal + { + honest = new HonestDisputeActor({ + _gameProxy: IFaultDisputeGame(address(gameProxy)), + _l2Outputs: _honestL2Outputs, + _trace: _honestTrace, + _preStateData: _honestPreStateData + }); + dishonest = new HonestDisputeActor({ + _gameProxy: IFaultDisputeGame(address(gameProxy)), + _l2Outputs: _dishonestL2Outputs, + _trace: _dishonestTrace, + _preStateData: _dishonestPreStateData + }); + + vm.deal(address(honest), 100 ether); + vm.deal(address(dishonest), 100 ether); + vm.label(address(honest), "HonestActor"); + vm.label(address(dishonest), "DishonestActor"); + } + + /// @notice Helper to exhaust all moves from both actors. + function _exhaustMoves() internal { + while (true) { + // Allow the dishonest actor to make their moves, and then the honest actor. + (uint256 numMovesA,) = dishonest.move(); + (uint256 numMovesB, bool success) = honest.move(); + + require(success, "FaultDispute_1v1_Actors_Test: Honest actor's moves should always be successful"); + + // If both actors have run out of moves, we're done. + if (numMovesA == 0 && numMovesB == 0) break; + } + } + + /// @notice Helper to warp past the chess clock and resolve all claims within the dispute game. + function _warpAndResolve() internal { + // Warp past the chess clock + vm.warp(block.timestamp + 3 days + 12 hours); + + // Resolve all claims in reverse order. We allow `resolveClaim` calls to fail due to the + // check that prevents claims with no subgames attached from being passed to + // `resolveClaim`. There's also a check in `resolve` to ensure all children have been + // resolved before global resolution, which catches any unresolved subgames here. + for (uint256 i = gameProxy.claimDataLen(); i > 0; i--) { + (bool success,) = address(gameProxy).call(abi.encodeCall(gameProxy.resolveClaim, (i - 1, 0))); + assertTrue(success); + } + gameProxy.resolve(); + } +} diff --git a/packages/contracts-bedrock/test/dispute/v2/PermissionedDisputeGameV2.t.sol b/packages/contracts-bedrock/test/dispute/v2/PermissionedDisputeGameV2.t.sol new file mode 100644 index 0000000000000..1e621ac80389e --- /dev/null +++ b/packages/contracts-bedrock/test/dispute/v2/PermissionedDisputeGameV2.t.sol @@ -0,0 +1,417 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +// Testing +import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; +import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; + +// Libraries +import "src/dispute/lib/Types.sol"; +import "src/dispute/lib/Errors.sol"; + +// Interfaces +import { IPermissionedDisputeGameV2 } from "interfaces/dispute/v2/IPermissionedDisputeGameV2.sol"; +import { IFaultDisputeGameV2 } from "interfaces/dispute/v2/IFaultDisputeGameV2.sol"; + +/// @title PermissionedDisputeGameV2_TestInit +/// @notice Reusable test initialization for `PermissionedDisputeGame` tests. +contract PermissionedDisputeGameV2_TestInit is DisputeGameFactory_TestInit { + /// @notice The type of the game being tested. + GameType internal immutable GAME_TYPE = GameTypes.PERMISSIONED_CANNON; + /// @notice Mock proposer key + address internal constant PROPOSER = address(0xfacade9); + /// @notice Mock challenger key + address internal constant CHALLENGER = address(0xfacadec); + + /// @dev The initial bond for the game. + uint256 internal initBond; + + /// @notice The implementation of the game. + IPermissionedDisputeGameV2 internal gameImpl; + /// @notice The `Clone` proxy of the game. + IPermissionedDisputeGameV2 internal gameProxy; + + /// @notice The extra data passed to the game for initialization. + bytes internal extraData; + + /// @notice The root claim of the game. + Claim internal rootClaim; + /// @notice An arbitrary root claim for testing. + Claim internal arbitaryRootClaim = Claim.wrap(bytes32(uint256(123))); + /// @notice Minimum bond value that covers all possible moves. + uint256 internal constant MIN_BOND = 50 ether; + + /// @notice The preimage of the absolute prestate claim + bytes internal absolutePrestateData; + /// @notice The absolute prestate of the trace. + Claim internal absolutePrestate; + /// @notice A valid l2BlockNumber that comes after the current anchor root block. + uint256 validL2BlockNumber; + + event Move(uint256 indexed parentIndex, Claim indexed pivot, address indexed claimant); + + function init(Claim _rootClaim, Claim _absolutePrestate, uint256 _l2BlockNumber) public { + // Set the time to a realistic date. + if (!isForkTest()) { + vm.warp(1690906994); + } + + // Fund the proposer on this fork. + vm.deal(PROPOSER, 100 ether); + + // Set the extra data for the game creation + extraData = abi.encode(_l2BlockNumber); + + (address _impl, AlphabetVM _vm,) = setupPermissionedDisputeGameV2(_absolutePrestate, PROPOSER, CHALLENGER); + gameImpl = IPermissionedDisputeGameV2(_impl); + + // Create a new game. + initBond = disputeGameFactory.initBonds(GAME_TYPE); + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(anchorStateRegistry.anchors, (GAME_TYPE)), + abi.encode(_rootClaim, 0) + ); + vm.prank(PROPOSER, PROPOSER); + gameProxy = IPermissionedDisputeGameV2( + payable(address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, _rootClaim, extraData))) + ); + + // Check immutables + assertEq(gameProxy.proposer(), PROPOSER); + assertEq(gameProxy.challenger(), CHALLENGER); + assertEq(gameProxy.gameType().raw(), GAME_TYPE.raw()); + assertEq(gameProxy.absolutePrestate().raw(), _absolutePrestate.raw()); + assertEq(gameProxy.maxGameDepth(), 2 ** 3); + assertEq(gameProxy.splitDepth(), 2 ** 2); + assertEq(gameProxy.clockExtension().raw(), 3 hours); + assertEq(gameProxy.maxClockDuration().raw(), 3.5 days); + assertEq(address(gameProxy.weth()), address(delayedWeth)); + assertEq(address(gameProxy.anchorStateRegistry()), address(anchorStateRegistry)); + assertEq(address(gameProxy.vm()), address(_vm)); + assertEq(address(gameProxy.gameCreator()), PROPOSER); + assertEq(gameProxy.l2ChainId(), l2ChainId); + + // Label the proxy + vm.label(address(gameProxy), "PermissionedDisputeGame_Clone"); + } + + function setUp() public override { + absolutePrestateData = abi.encode(0); + absolutePrestate = _changeClaimStatus(Claim.wrap(keccak256(absolutePrestateData)), VMStatuses.UNFINISHED); + + super.setUp(); + + // Get the actual anchor roots + (Hash root, uint256 l2BlockNumber) = anchorStateRegistry.getAnchorRoot(); + validL2BlockNumber = l2BlockNumber + 1; + rootClaim = Claim.wrap(Hash.unwrap(root)); + init({ _rootClaim: rootClaim, _absolutePrestate: absolutePrestate, _l2BlockNumber: validL2BlockNumber }); + } + + /// @dev Helper to return a pseudo-random claim + function _dummyClaim() internal view returns (Claim) { + return Claim.wrap(keccak256(abi.encode(gasleft()))); + } + + /// @dev Helper to get the required bond for the given claim index. + function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { + (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); + Position pos = parent.move(true); + bond_ = gameProxy.getRequiredBond(pos); + } + + /// @dev Helper to change the VM status byte of a claim. + function _changeClaimStatus(Claim _claim, VMStatus _status) internal pure returns (Claim out_) { + assembly { + out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) + } + } + + fallback() external payable { } + + receive() external payable { } + + function copyBytes(bytes memory src, bytes memory dest) internal pure returns (bytes memory) { + uint256 byteCount = src.length < dest.length ? src.length : dest.length; + for (uint256 i = 0; i < byteCount; i++) { + dest[i] = src[i]; + } + return dest; + } +} + +/// @title PermissionedDisputeGameV2_Version_Test +/// @notice Tests the `version` function of the `PermissionedDisputeGame` contract. +contract PermissionedDisputeGameV2_Version_Test is PermissionedDisputeGameV2_TestInit { + /// @notice Tests that the game's version function returns a string. + function test_version_works() public view { + assertTrue(bytes(gameProxy.version()).length > 0); + } +} + +/// @title PermissionedDisputeGameV2_Step_Test +/// @notice Tests the `step` function of the `PermissionedDisputeGame` contract. +contract PermissionedDisputeGameV2_Step_Test is PermissionedDisputeGameV2_TestInit { + /// @notice Tests that step works properly for the challenger. + function test_step_fromChallenger_succeeds() public { + validateStepForActor(CHALLENGER); + } + + /// @notice Tests that step works properly for the proposer. + function test_step_fromProposer_succeeds() public { + validateStepForActor(PROPOSER); + } + + function validateStepForActor(address actor) internal { + vm.deal(actor, 1_000 ether); + vm.startPrank(actor, actor); + + // Set up and perform the step + setupGameForStep(); + performStep(); + assertEq(gameProxy.claimDataLen(), 9); + + // Resolve the game and check that the expected actor countered the root claim + resolveGame(); + assertEq(uint256(gameProxy.status()), uint256(GameStatus.CHALLENGER_WINS)); + assertEq(gameProxy.resolvedAt().raw(), block.timestamp); + (, address counteredBy,,,,,) = gameProxy.claimData(0); + assertEq(counteredBy, actor); + + vm.stopPrank(); + } + + /// @notice Tests that step reverts for unauthorized addresses. + function test_step_notAuthorized_reverts(address _unauthorized) internal { + vm.assume(_unauthorized != PROPOSER && _unauthorized != CHALLENGER); + vm.deal(_unauthorized, 1_000 ether); + vm.deal(CHALLENGER, 1_000 ether); + + // Set up for the step using an authorized actor + vm.startPrank(CHALLENGER, CHALLENGER); + setupGameForStep(); + vm.stopPrank(); + + // Perform step with the unauthorized actor + vm.startPrank(_unauthorized, _unauthorized); + vm.expectRevert(BadAuth.selector); + performStep(); + + // Game should still be in progress, leaf claim should be missing + assertEq(uint256(gameProxy.status()), uint256(GameStatus.CHALLENGER_WINS)); + assertEq(gameProxy.claimDataLen(), 8); + + vm.stopPrank(); + } + + function setupGameForStep() internal { + // Make claims all the way down the tree. + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: _getRequiredBond(0) }(disputed, 0, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.attack{ value: _getRequiredBond(1) }(disputed, 1, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.attack{ value: _getRequiredBond(2) }(disputed, 2, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(3); + gameProxy.attack{ value: _getRequiredBond(3) }(disputed, 3, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(4); + gameProxy.attack{ value: _getRequiredBond(4) }(disputed, 4, _changeClaimStatus(_dummyClaim(), VMStatuses.PANIC)); + (,,,, disputed,,) = gameProxy.claimData(5); + gameProxy.attack{ value: _getRequiredBond(5) }(disputed, 5, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(6); + gameProxy.attack{ value: _getRequiredBond(6) }(disputed, 6, _dummyClaim()); + (,,,, disputed,,) = gameProxy.claimData(7); + gameProxy.attack{ value: _getRequiredBond(7) }(disputed, 7, _dummyClaim()); + + // Verify game state and add local data + assertEq(uint256(gameProxy.status()), uint256(GameStatus.IN_PROGRESS)); + gameProxy.addLocalData(LocalPreimageKey.DISPUTED_L2_BLOCK_NUMBER, 8, 0); + } + + function performStep() internal { + gameProxy.step(8, true, absolutePrestateData, hex""); + } + + function resolveGame() internal { + vm.warp(block.timestamp + gameProxy.maxClockDuration().raw() + 1); + gameProxy.resolveClaim(8, 0); + gameProxy.resolveClaim(7, 0); + gameProxy.resolveClaim(6, 0); + gameProxy.resolveClaim(5, 0); + gameProxy.resolveClaim(4, 0); + gameProxy.resolveClaim(3, 0); + gameProxy.resolveClaim(2, 0); + gameProxy.resolveClaim(1, 0); + + gameProxy.resolveClaim(0, 0); + gameProxy.resolve(); + } +} + +/// @title PermissionedDisputeGame_Initialize_Test +/// @notice Tests the initialization of the `PermissionedDisputeGame` contract. +contract PermissionedDisputeGameV2_Initialize_Test is PermissionedDisputeGameV2_TestInit { + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by extraData of the wrong length + function test_initialize_wrongExtradataLength_reverts(uint256 _extraDataLen) public { + // The `DisputeGameFactory` will pack the root claim and the extra data into a single + // array, which is enforced to be at least 64 bytes long. + // We bound the upper end to 23.5KB to ensure that the minimal proxy never surpasses the + // contract size limit in this test, as CWIA proxies store the immutable args in their + // bytecode. + // [0 bytes, 31 bytes] u [33 bytes, 23.5 KB] + _extraDataLen = bound(_extraDataLen, 0, 23_500); + if (_extraDataLen == 32) { + _extraDataLen++; + } + bytes memory _extraData = new bytes(_extraDataLen); + + // Assign the first 32 bytes in `extraData` to a valid L2 block number passed the starting + // block. + (, uint256 startingL2Block) = gameProxy.startingOutputRoot(); + assembly { + mstore(add(_extraData, 0x20), add(startingL2Block, 1)) + } + + Claim claim = _dummyClaim(); + vm.prank(PROPOSER, PROPOSER); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IPermissionedDisputeGameV2( + payable(address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, _extraData))) + ); + } + + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by additional immutable args data + function test_initialize_extraImmutableArgsBytes_reverts(uint256 _extraByteCount) public { + (bytes memory correctArgs,,) = getPermissionedDisputeGameV2ImmutableArgs(absolutePrestate, PROPOSER, CHALLENGER); + + // We bound the upper end to 23.5KB to ensure that the minimal proxy never surpasses the + // contract size limit in this test, as CWIA proxies store the immutable args in their + // bytecode. + _extraByteCount = bound(_extraByteCount, 1, 23_500); + bytes memory immutableArgs = new bytes(_extraByteCount + correctArgs.length); + // Copy correct args into immutable args + copyBytes(correctArgs, immutableArgs); + + // Set up dispute game implementation with target immutableArgs + setupPermissionedDisputeGameV2(immutableArgs); + + Claim claim = _dummyClaim(); + vm.prank(PROPOSER, PROPOSER); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IPermissionedDisputeGameV2( + payable( + address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, abi.encode(validL2BlockNumber))) + ) + ); + } + + /// @notice Tests that the game cannot be initialized with incorrect CWIA calldata length + /// caused by missing immutable args data + function test_initialize_missingImmutableArgsBytes_reverts(uint256 _truncatedByteCount) public { + (bytes memory correctArgs,,) = getPermissionedDisputeGameV2ImmutableArgs(absolutePrestate, PROPOSER, CHALLENGER); + + _truncatedByteCount = (_truncatedByteCount % correctArgs.length) + 1; + bytes memory immutableArgs = new bytes(correctArgs.length - _truncatedByteCount); + // Copy correct args into immutable args + copyBytes(correctArgs, immutableArgs); + + // Set up dispute game implementation with target immutableArgs + setupPermissionedDisputeGameV2(immutableArgs); + + Claim claim = _dummyClaim(); + vm.prank(PROPOSER, PROPOSER); + vm.expectRevert(IFaultDisputeGameV2.BadExtraData.selector); + gameProxy = IPermissionedDisputeGameV2( + payable( + address(disputeGameFactory.create{ value: initBond }(GAME_TYPE, claim, abi.encode(validL2BlockNumber))) + ) + ); + } +} + +/// @title PermissionedDisputeGameV2_Uncategorized_Test +/// @notice General tests that are not testing any function directly of the +/// `PermissionedDisputeGame` contract or are testing multiple functions at once. +contract PermissionedDisputeGameV2_Uncategorized_Test is PermissionedDisputeGameV2_TestInit { + /// @notice Tests that the proposer can create a permissioned dispute game. + function test_createGame_proposer_succeeds() public { + vm.prank(PROPOSER, PROPOSER); + disputeGameFactory.create{ value: initBond }(GAME_TYPE, arbitaryRootClaim, abi.encode(validL2BlockNumber)); + } + + /// @notice Tests that the permissioned game cannot be created by the challenger. + function test_createGame_challenger_reverts() public { + vm.deal(CHALLENGER, initBond); + vm.prank(CHALLENGER, CHALLENGER); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: initBond }(GAME_TYPE, arbitaryRootClaim, abi.encode(validL2BlockNumber)); + } + + /// @notice Tests that the permissioned game cannot be created by any address other than the + /// proposer. + function testFuzz_createGame_notProposer_reverts(address _p) public { + vm.assume(_p != PROPOSER); + + vm.deal(_p, initBond); + vm.prank(_p, _p); + vm.expectRevert(BadAuth.selector); + disputeGameFactory.create{ value: initBond }(GAME_TYPE, arbitaryRootClaim, abi.encode(validL2BlockNumber)); + } + + /// @notice Tests that the challenger can participate in a permissioned dispute game. + function test_participateInGame_challenger_succeeds() public { + vm.startPrank(CHALLENGER, CHALLENGER); + uint256 firstBond = _getRequiredBond(0); + vm.deal(CHALLENGER, firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: firstBond }(disputed, 0, Claim.wrap(0)); + uint256 secondBond = _getRequiredBond(1); + vm.deal(CHALLENGER, secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.defend{ value: secondBond }(disputed, 1, Claim.wrap(0)); + uint256 thirdBond = _getRequiredBond(2); + vm.deal(CHALLENGER, thirdBond); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.move{ value: thirdBond }(disputed, 2, Claim.wrap(0), true); + vm.stopPrank(); + } + + /// @notice Tests that the proposer can participate in a permissioned dispute game. + function test_participateInGame_proposer_succeeds() public { + vm.startPrank(PROPOSER, PROPOSER); + uint256 firstBond = _getRequiredBond(0); + vm.deal(PROPOSER, firstBond); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + gameProxy.attack{ value: firstBond }(disputed, 0, Claim.wrap(0)); + uint256 secondBond = _getRequiredBond(1); + vm.deal(PROPOSER, secondBond); + (,,,, disputed,,) = gameProxy.claimData(1); + gameProxy.defend{ value: secondBond }(disputed, 1, Claim.wrap(0)); + uint256 thirdBond = _getRequiredBond(2); + vm.deal(PROPOSER, thirdBond); + (,,,, disputed,,) = gameProxy.claimData(2); + gameProxy.move{ value: thirdBond }(disputed, 2, Claim.wrap(0), true); + vm.stopPrank(); + } + + /// @notice Tests that addresses that are not the proposer or challenger cannot participate in + /// a permissioned dispute game. + function test_participateInGame_notAuthorized_reverts(address _p) public { + vm.assume(_p != PROPOSER && _p != CHALLENGER); + + vm.startPrank(_p, _p); + (,,,, Claim disputed,,) = gameProxy.claimData(0); + vm.expectRevert(BadAuth.selector); + gameProxy.attack(disputed, 0, Claim.wrap(0)); + vm.expectRevert(BadAuth.selector); + gameProxy.defend(disputed, 0, Claim.wrap(0)); + vm.expectRevert(BadAuth.selector); + gameProxy.move(disputed, 0, Claim.wrap(0), true); + vm.expectRevert(BadAuth.selector); + gameProxy.step(0, true, absolutePrestateData, hex""); + vm.stopPrank(); + } +} diff --git a/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol b/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol index 86668018440c1..b046f30f71a0f 100644 --- a/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol +++ b/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol @@ -58,10 +58,10 @@ contract GovernanceToken_Mint_Test is GovernanceToken_TestInit { } } -/// @title GovernanceToken_Unclassified_Test +/// @title GovernanceToken_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `GovernanceToken` /// contract or are testing multiple functions at once. -contract GovernanceToken_Unclassified_Test is GovernanceToken_TestInit { +contract GovernanceToken_Uncategorized_Test is GovernanceToken_TestInit { /// @notice Tests that the owner can successfully call `burn`. function test_burn_succeeds() external { // Mint 100 tokens to rando. diff --git a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol index a0dc3affa9de6..63d05ad7a4f94 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismPortal2.t.sol @@ -143,6 +143,7 @@ contract OptimismPortal2_Invariant_Harness is DisputeGameFactory_TestInit { // Fund the portal so that we can withdraw ETH. vm.deal(address(ethLockbox), 0xFFFFFFFF); + vm.deal(address(optimismPortal2), 0xFFFFFFFF); } } diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol index acb3c7e06ed4e..bf7888e850222 100644 --- a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -206,10 +206,10 @@ contract L1ChugSplashProxy_GetImplementation_Test is L1ChugSplashProxy_TestInit } } -/// @title L1ChugSplashProxy_Unclassified_Test +/// @title L1ChugSplashProxy_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `L1ChugSplashProxy` /// contract or are testing multiple functions at once. -contract L1ChugSplashProxy_Unclassified_Test is L1ChugSplashProxy_TestInit { +contract L1ChugSplashProxy_Uncategorized_Test is L1ChugSplashProxy_TestInit { /// @notice Tests that when the caller is not the owner and the implementation is not set, all /// calls reverts. function test_calls_whenNotOwnerNoImplementation_reverts() public { diff --git a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol index 0037d4015d588..ac3ae1248ebe5 100644 --- a/packages/contracts-bedrock/test/libraries/Blueprint.t.sol +++ b/packages/contracts-bedrock/test/libraries/Blueprint.t.sol @@ -237,10 +237,10 @@ contract Blueprint_BytesToUint_Test is Blueprint_TestInit { } } -/// @title Blueprint_Unclassified_Test +/// @title Blueprint_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Blueprint` library or /// are testing multiple functions at once. -contract Blueprint_Unclassified_Test is Blueprint_TestInit { +contract Blueprint_Uncategorized_Test is Blueprint_TestInit { /// @dev Tests that a roundtrip from initcode to blueprint to initcode succeeds, ensuring the /// invariant that `parseBlueprintPreamble(blueprintDeployerBytecode(x)) = x`. function testFuzz_roundtrip_succeeds(bytes memory _initcode) public { diff --git a/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol b/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol new file mode 100644 index 0000000000000..cd679dd250475 --- /dev/null +++ b/packages/contracts-bedrock/test/libraries/DevFeatures.t.sol @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing utilities +import { Test } from "forge-std/Test.sol"; + +// Target contract +import { DevFeatures } from "src/libraries/DevFeatures.sol"; + +contract DevFeatures_isDevFeatureEnabled_Test is Test { + bytes32 internal constant FEATURE_A = bytes32(0x0000000000000000000000000000000000000000000000000000000000000001); + bytes32 internal constant FEATURE_B = bytes32(0x0000000000000000000000000000000000000000000000000000000000000100); + bytes32 internal constant FEATURE_C = bytes32(0x1000000000000000000000000000000000000000000000000000000000000000); + + bytes32 internal constant FEATURES_AB = FEATURE_A | FEATURE_B; + bytes32 internal constant FEATURES_ABC = FEATURE_A | FEATURE_B | FEATURE_C; + bytes32 internal constant FEATURES_AB_INVERTED = ~FEATURES_AB; + bytes32 internal constant EMPTY_FEATURES = + bytes32(0x0000000000000000000000000000000000000000000000000000000000000000); + bytes32 internal constant ALL_FEATURES = bytes32(0x1111111111111111111111111111111111111111111111111111111111111111); + + function test_isDevFeatureEnabled_checkSingleFeatureExactMatch_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURE_A, FEATURE_A)); + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURE_B, FEATURE_B)); + } + + function test_isDevFeatureEnabled_checkSingleFeatureAgainstSuperset_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURES_AB, FEATURE_A)); + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURES_AB, FEATURE_B)); + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURES_ABC, FEATURE_A)); + } + + function test_isDevFeatureEnabled_checkSingleFeatureAgainstAll_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(ALL_FEATURES, FEATURE_A)); + assertTrue(DevFeatures.isDevFeatureEnabled(ALL_FEATURES, FEATURE_B)); + } + + function test_isDevFeatureEnabled_checkSingleFeatureAgainstMismatchedBitmap_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURE_B, FEATURE_A)); + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURE_A, FEATURE_B)); + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURE_A)); + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURE_B)); + } + + function test_isDevFeatureEnabled_checkSingleFeatureAgainstEmptyBitmap_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(EMPTY_FEATURES, FEATURE_A)); + assertFalse(DevFeatures.isDevFeatureEnabled(EMPTY_FEATURES, FEATURE_B)); + } + + function test_isDevFeatureEnabled_checkCombinedFeaturesAgainstExactMatch_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURES_AB, FEATURES_AB)); + } + + function test_isDevFeatureEnabled_checkCombinedFeatureAgainstSuperset_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(ALL_FEATURES, FEATURES_AB)); + assertTrue(DevFeatures.isDevFeatureEnabled(FEATURES_ABC, FEATURES_AB)); + } + + function test_isDevFeatureEnabled_checkCombinedFeaturesAgainstSubset_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURE_A, FEATURES_AB)); + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURE_B, FEATURES_AB)); + } + + function test_isDevFeatureEnabled_checkCombinedFeaturesAgainstMismatchedBitmap_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURES_AB_INVERTED, FEATURES_AB)); + assertFalse(DevFeatures.isDevFeatureEnabled(EMPTY_FEATURES, FEATURES_AB)); + assertFalse(DevFeatures.isDevFeatureEnabled(FEATURE_C, FEATURES_AB)); + } + + function test_isDevFeatureEnabled_checkEmptyVsEmpty_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(EMPTY_FEATURES, EMPTY_FEATURES)); + } + + function test_isDevFeatureEnabled_checkAllVsAll_works() public pure { + assertTrue(DevFeatures.isDevFeatureEnabled(ALL_FEATURES, ALL_FEATURES)); + } + + function test_isDevFeatureEnabled_checkEmptyAgainstAll_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(ALL_FEATURES, EMPTY_FEATURES)); + } + + function test_isDevFeatureEnabled_checkAllAgainstEmpty_works() public pure { + assertFalse(DevFeatures.isDevFeatureEnabled(EMPTY_FEATURES, ALL_FEATURES)); + } +} diff --git a/packages/contracts-bedrock/test/libraries/Encoding.t.sol b/packages/contracts-bedrock/test/libraries/Encoding.t.sol index adb1528d7243f..9ae152ae9f675 100644 --- a/packages/contracts-bedrock/test/libraries/Encoding.t.sol +++ b/packages/contracts-bedrock/test/libraries/Encoding.t.sol @@ -303,10 +303,10 @@ contract Encoding_EncodeSuperRootProof_Test is Encoding_TestInit { } } -/// @title Encoding_Unclassified_Test +/// @title Encoding_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Encoding` contract or /// are testing multiple functions at once. -contract Encoding_Unclassified_Test is Encoding_TestInit { +contract Encoding_Uncategorized_Test is Encoding_TestInit { /// @notice Tests encoding and decoding a nonce and version. function testFuzz_nonceVersioning_succeeds(uint240 _nonce, uint16 _version) external pure { (uint240 nonce, uint16 version) = Encoding.decodeVersionedNonce(Encoding.encodeVersionedNonce(_nonce, _version)); diff --git a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol index d883c8e2b8051..e495a758b8f64 100644 --- a/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol +++ b/packages/contracts-bedrock/test/libraries/GasPayingToken.t.sol @@ -116,10 +116,10 @@ contract GasPayingToken_Sanitize_Test is GasPayingToken_TestInit { } } -/// @title GasPayingToken_Unclassified_Test +/// @title GasPayingToken_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `GasPayingToken` /// library or are testing multiple functions at once. -contract GasPayingToken_Unclassified_Test is GasPayingToken_TestInit { +contract GasPayingToken_Uncategorized_Test is GasPayingToken_TestInit { /// @notice Test that the gas paying token correctly sets values in storage when input name /// and symbol are strings. function testFuzz_setGetWithSanitize_succeeds( diff --git a/packages/contracts-bedrock/test/libraries/Predeploys.t.sol b/packages/contracts-bedrock/test/libraries/Predeploys.t.sol index 2daaa491cc463..ecdf2b409de73 100644 --- a/packages/contracts-bedrock/test/libraries/Predeploys.t.sol +++ b/packages/contracts-bedrock/test/libraries/Predeploys.t.sol @@ -127,10 +127,10 @@ contract Predeploys_PredeployToCodeNamespace_Test is Predeploys_TestInit { } } -/// @title Predeploys_Unclassified_Test +/// @title Predeploys_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Predeploys` contract /// or are testing multiple functions at once. -contract Predeploys_Unclassified_Test is Predeploys_TestInit { +contract Predeploys_Uncategorized_Test is Predeploys_TestInit { /// @notice Tests that the predeploy addresses are set correctly. They have code /// and the proxied accounts have the correct admin. function test_predeploys_succeeds() external { @@ -138,10 +138,10 @@ contract Predeploys_Unclassified_Test is Predeploys_TestInit { } } -/// @title Predeploys_Interop_Unclassified_Test +/// @title Predeploys_Interop_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Predeploys` contract /// or are testing multiple functions at once, using interop mode. -contract Predeploys_UnclassifiedInterop_Test is Predeploys_TestInit { +contract Predeploys_UncategorizedInterop_Test is Predeploys_TestInit { /// @notice Test setup. Enabling interop to get all predeploys. function setUp() public virtual override { super.enableInterop(); diff --git a/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol b/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol index e2fd60128e311..3941ed83bb17e 100644 --- a/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol +++ b/packages/contracts-bedrock/test/libraries/Preinstalls.t.sol @@ -46,10 +46,10 @@ contract Preinstalls_GetPermit2Code_Test is Preinstalls_TestInit { } } -/// @title Preinstalls_Unclassified_Test +/// @title Preinstalls_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Preinstalls` contract /// or are testing multiple functions at once. -contract Preinstalls_Unclassified_Test is Preinstalls_TestInit { +contract Preinstalls_Uncategorized_Test is Preinstalls_TestInit { /// @notice The domain separator commits to the chainid of the chain function test_preinstall_permit2DomainSeparator_works() external view { bytes32 domainSeparator = IEIP712(Preinstalls.Permit2).DOMAIN_SEPARATOR(); diff --git a/packages/contracts-bedrock/test/libraries/SemverComp.t.sol b/packages/contracts-bedrock/test/libraries/SemverComp.t.sol new file mode 100644 index 0000000000000..3bdf2b2cc9b59 --- /dev/null +++ b/packages/contracts-bedrock/test/libraries/SemverComp.t.sol @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Forge +import { Test } from "forge-std/Test.sol"; + +// Libraries +import { JSONParserLib } from "solady/src/utils/JSONParserLib.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; + +/// @title SemverComp_Harness +/// @notice Exposes internal functions of `SemverComp` for testing. +contract SemverComp_Harness { + /// @notice Parses a semver string into a Semver struct. This is a wrapper around + /// `SemverComp.parse` that returns the major, minor, and patch components as + /// separate values. + /// @param _semver The semver string to parse. + /// @return major_ The major version. + /// @return minor_ The minor version. + /// @return patch_ The patch version. + function parse(string memory _semver) external pure returns (uint256 major_, uint256 minor_, uint256 patch_) { + SemverComp.Semver memory v = SemverComp.parse(_semver); + return (v.major, v.minor, v.patch); + } +} + +/// @title SemverComp_TestInit +/// @notice Reusable test initialization for `SemverComp` tests. +contract SemverComp_TestInit is Test { + SemverComp_Harness internal harness; + + /// @notice Sets up the test environment. + function setUp() public { + harness = new SemverComp_Harness(); + } + + /// @notice Asserts that the parsed semver components match the expected values. + /// @param _semver The semver string to parse. + /// @param _major The expected major version. + /// @param _minor The expected minor version. + /// @param _patch The expected patch version. + function assertParsedEq(string memory _semver, uint256 _major, uint256 _minor, uint256 _patch) internal view { + (uint256 major, uint256 minor, uint256 patch) = harness.parse(_semver); + assertEq(major, _major, "major mismatch"); + assertEq(minor, _minor, "minor mismatch"); + assertEq(patch, _patch, "patch mismatch"); + } +} + +/// @title SemverComp_parse_Test +/// @notice Tests the `parse` function behavior. +contract SemverComp_parse_Test is SemverComp_TestInit { + /// @notice Parses the minimal version. + function test_parse_basicZero_succeeds() external view { + assertParsedEq("0.0.0", 0, 0, 0); + } + + /// @notice Parses a standard version. + function test_parse_basic123_succeeds() external view { + assertParsedEq("1.2.3", 1, 2, 3); + } + + /// @notice Ignores prerelease identifiers. + function test_parse_withPrerelease_succeeds() external view { + assertParsedEq("1.2.3-alpha", 1, 2, 3); + assertParsedEq("1.2.3-alpha.1", 1, 2, 3); + assertParsedEq("10.20.30-rc.1", 10, 20, 30); + } + + /// @notice Ignores build metadata. + function test_parse_withBuildMetadataOnly_succeeds() external view { + assertParsedEq("1.2.3+build.5", 1, 2, 3); + assertParsedEq("1.2.3+20240101", 1, 2, 3); + } + + /// @notice Ignores prerelease and build metadata together. + function test_parse_withPrereleaseAndBuild_succeeds() external view { + assertParsedEq("1.2.3-rc.1+build.5", 1, 2, 3); + assertParsedEq("2.0.0-beta+exp.sha.5114f85", 2, 0, 0); + } + + /// @notice Reverts when fewer than 3 dot-separated core parts are present. + function test_parse_lessThanThreeParts_reverts() external { + vm.expectRevert(SemverComp.SemverComp_InvalidSemverParts.selector); + harness.parse("1.2"); + + vm.expectRevert(SemverComp.SemverComp_InvalidSemverParts.selector); + harness.parse("1"); + + vm.expectRevert(SemverComp.SemverComp_InvalidSemverParts.selector); + harness.parse(""); + } + + /// @notice Current behavior: extra dot-components beyond the core 3 are ignored. + function test_parse_extraDotComponents_succeeds() external view { + assertParsedEq("1.2.3.4", 1, 2, 3); + assertParsedEq("1.2.3.4.5", 1, 2, 3); + } + + /// @notice Reverts on non-numeric core parts. + function test_parse_nonNumeric_reverts() external { + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse("a.b.c"); + + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse("1.b.3"); + + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse("1.2.c"); + } + + /// @notice Reverts on certain commonly malformed inputs. + function test_parse_malformedInputs_reverts() external { + // Leading/trailing whitespace + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse(" 1.2.3"); + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse("1.2.3 "); + + // "v" prefix + vm.expectRevert(JSONParserLib.ParsingFailed.selector); + harness.parse("v1.2.3"); + } +} + +/// @title SemverComp_Eq_Test +/// @notice Tests the `eq` function behavior. +contract SemverComp_Eq_Test is SemverComp_TestInit { + function test_eq_succeeds() external pure { + assertTrue(SemverComp.eq("1.2.3", "1.2.3")); + + assertFalse(SemverComp.eq("1.2.3", "1.2.4")); + assertFalse(SemverComp.eq("1.2.3", "1.3.3")); + assertFalse(SemverComp.eq("1.2.3", "2.2.3")); + } +} + +/// @title SemverComp_Lt_Test +/// @notice Tests the `lt` function behavior. +contract SemverComp_Lt_Test is SemverComp_TestInit { + function test_lt_succeeds() external pure { + assertTrue(SemverComp.lt("1.2.3", "1.2.4")); + assertTrue(SemverComp.lt("1.2.3", "1.3.0")); + assertTrue(SemverComp.lt("1.2.3", "2.0.0")); + + assertFalse(SemverComp.lt("1.2.3", "1.2.3")); + assertFalse(SemverComp.lt("1.2.3", "1.2.2")); + assertFalse(SemverComp.lt("2.0.0", "1.9.9")); + } +} + +/// @title SemverComp_Lte_Test +/// @notice Tests the `lte` function behavior. +contract SemverComp_Lte_Test is SemverComp_TestInit { + function test_lte_succeeds() external pure { + assertTrue(SemverComp.lte("1.2.3", "1.2.3")); + assertTrue(SemverComp.lte("1.2.3", "1.2.4")); + assertTrue(SemverComp.lte("1.2.3", "1.3.0")); + assertTrue(SemverComp.lte("1.2.3", "2.0.0")); + + assertFalse(SemverComp.lte("1.2.3", "1.2.2")); + assertFalse(SemverComp.lte("2.0.0", "1.9.9")); + } +} + +/// @title SemverComp_Gt_Test +/// @notice Tests the `gt` function behavior. +contract SemverComp_Gt_Test is SemverComp_TestInit { + function test_gt_succeeds() external pure { + assertTrue(SemverComp.gt("1.2.4", "1.2.3")); + assertTrue(SemverComp.gt("1.3.0", "1.2.3")); + assertTrue(SemverComp.gt("2.0.0", "1.2.3")); + + assertFalse(SemverComp.gt("1.2.3", "1.2.3")); + assertFalse(SemverComp.gt("1.2.2", "1.2.3")); + assertFalse(SemverComp.gt("1.9.9", "2.0.0")); + } +} + +/// @title SemverComp_Gte_Test +/// @notice Tests the `gte` function behavior. +contract SemverComp_Gte_Test is SemverComp_TestInit { + function test_gte_succeeds() external pure { + assertTrue(SemverComp.gte("1.2.3", "1.2.3")); + assertTrue(SemverComp.gte("1.2.4", "1.2.3")); + assertTrue(SemverComp.gte("1.3.0", "1.2.3")); + assertTrue(SemverComp.gte("2.0.0", "1.2.3")); + + assertFalse(SemverComp.gte("1.2.2", "1.2.3")); + assertFalse(SemverComp.gte("1.9.9", "2.0.0")); + } +} diff --git a/packages/contracts-bedrock/test/libraries/Storage.t.sol b/packages/contracts-bedrock/test/libraries/Storage.t.sol index 1c46737a4084d..16c1e8727b7be 100644 --- a/packages/contracts-bedrock/test/libraries/Storage.t.sol +++ b/packages/contracts-bedrock/test/libraries/Storage.t.sol @@ -5,48 +5,86 @@ pragma solidity 0.8.15; import { StorageSetter } from "src/universal/StorageSetter.sol"; import { Test } from "forge-std/Test.sol"; -/// @title Storage_Roundtrip_Test -/// @notice Tests the storage setting and getting through the StorageSetter contract. -/// This contract simply wraps the Storage library, this is required as to -/// not poison the storage of the `Test` contract. -contract Storage_Roundtrip_Test is Test { +/// @title Storage_TestInit +/// @notice Reusable test initialization for `Storage` tests. +contract Storage_TestInit is Test { StorageSetter setter; - /// @notice A set of storage slots to pass to `setBytes32`. - StorageSetter.Slot[] slots; - /// @notice Used to deduplicate slots passed to `setBytes32`. - mapping(bytes32 => bool) keys; - - function setUp() external { + function setUp() public { setter = new StorageSetter(); } +} - function test_setGetUint_succeeds(bytes32 slot, uint256 num) external { - setter.setUint(slot, num); - assertEq(setter.getUint(slot), num); - assertEq(num, uint256(vm.load(address(setter), slot))); +/// @title Storage_GetAddress_Test +/// @notice Tests the `getAddress` function of the `Storage` library. +contract Storage_GetAddress_Test is Storage_TestInit { + /// @notice Test that getAddress returns the correct address value from storage. + /// @param _slot The storage slot to test with. + /// @param _addr The address value to test with. + function testFuzz_getAddress_succeeds(bytes32 _slot, address _addr) external { + setter.setAddress(_slot, _addr); + assertEq(setter.getAddress(_slot), _addr); + assertEq(_addr, address(uint160(uint256(vm.load(address(setter), _slot))))); } +} - function test_setGetAddress_succeeds(bytes32 slot, address addr) external { - setter.setAddress(slot, addr); - assertEq(setter.getAddress(slot), addr); - assertEq(addr, address(uint160(uint256(vm.load(address(setter), slot))))); +/// @title Storage_SetAddress_Test +/// @notice Tests the `setAddress` function of the `Storage` library. +contract Storage_SetAddress_Test is Storage_TestInit { + /// @notice Test that setAddress correctly stores address values in arbitrary slots. + /// @param _slot The storage slot to test with. + /// @param _addr The address value to test with. + function testFuzz_setAddress_succeeds(bytes32 _slot, address _addr) external { + setter.setAddress(_slot, _addr); + assertEq(address(uint160(uint256(vm.load(address(setter), _slot)))), _addr); } +} + +/// @title Storage_GetUint_Test +/// @notice Tests the `getUint` function of the `Storage` library. +contract Storage_GetUint_Test is Storage_TestInit { + /// @notice Test that getUint returns the correct uint256 value from storage. + /// @param _slot The storage slot to test with. + /// @param _value The uint256 value to test with. + function testFuzz_getUint_succeeds(bytes32 _slot, uint256 _value) external { + setter.setUint(_slot, _value); + assertEq(setter.getUint(_slot), _value); + assertEq(_value, uint256(vm.load(address(setter), _slot))); + } +} - function test_setGetBytes32_succeeds(bytes32 slot, bytes32 hash) external { - setter.setBytes32(slot, hash); - assertEq(setter.getBytes32(slot), hash); - assertEq(hash, vm.load(address(setter), slot)); +/// @title Storage_SetUint_Test +/// @notice Tests the `setUint` function of the `Storage` library. +contract Storage_SetUint_Test is Storage_TestInit { + /// @notice Test that setUint correctly stores uint256 values in arbitrary slots. + /// @param _slot The storage slot to test with. + /// @param _value The uint256 value to test with. + function testFuzz_setUint_succeeds(bytes32 _slot, uint256 _value) external { + setter.setUint(_slot, _value); + assertEq(uint256(vm.load(address(setter), _slot)), _value); } +} + +/// @title Storage_GetBytes32_Test +/// @notice Tests the `getBytes32` function of the `Storage` library. +contract Storage_GetBytes32_Test is Storage_TestInit { + /// @notice A set of storage slots to pass to `setBytes32`. + StorageSetter.Slot[] slots; + /// @notice Used to deduplicate slots passed to `setBytes32`. + mapping(bytes32 => bool) keys; - function test_setGetBool_succeeds(bytes32 slot, bool value) external { - setter.setBool(slot, value); - assertEq(setter.getBool(slot), value); - assertEq(value, vm.load(address(setter), slot) == bytes32(uint256(1))); + /// @notice Test that getBytes32 returns the correct bytes32 value from storage. + /// @param _slot The storage slot to test with. + /// @param _value The bytes32 value to test with. + function testFuzz_getBytes32_succeeds(bytes32 _slot, bytes32 _value) external { + setter.setBytes32(_slot, _value); + assertEq(setter.getBytes32(_slot), _value); + assertEq(_value, vm.load(address(setter), _slot)); } - /// @dev All keys must be unique in the input so deduplication is required. - function testFuzz_setGetBytes32Multi_succeeds(StorageSetter.Slot[] calldata _slots) external { + /// @notice Test that multiple bytes32 values can be set and retrieved correctly. + /// @param _slots Array of storage slots and values to test with. + function testFuzz_getBytes32_multiSlot_succeeds(StorageSetter.Slot[] calldata _slots) external { for (uint256 i; i < _slots.length; i++) { if (keys[_slots[i].key]) { continue; @@ -62,3 +100,40 @@ contract Storage_Roundtrip_Test is Test { } } } + +/// @title Storage_SetBytes32_Test +/// @notice Tests the `setBytes32` function of the `Storage` library. +contract Storage_SetBytes32_Test is Storage_TestInit { + /// @notice Test that setBytes32 correctly stores bytes32 values in arbitrary slots. + /// @param _slot The storage slot to test with. + /// @param _value The bytes32 value to test with. + function testFuzz_setBytes32_succeeds(bytes32 _slot, bytes32 _value) external { + setter.setBytes32(_slot, _value); + assertEq(vm.load(address(setter), _slot), _value); + } +} + +/// @title Storage_SetBool_Test +/// @notice Tests the `setBool` function of the `Storage` library. +contract Storage_SetBool_Test is Storage_TestInit { + /// @notice Test that setBool correctly stores bool values in arbitrary slots. + /// @param _slot The storage slot to test with. + /// @param _value The bool value to test with. + function testFuzz_setBool_succeeds(bytes32 _slot, bool _value) external { + setter.setBool(_slot, _value); + assertEq(vm.load(address(setter), _slot) == bytes32(uint256(1)), _value); + } +} + +/// @title Storage_GetBool_Test +/// @notice Tests the `getBool` function of the `Storage` library. +contract Storage_GetBool_Test is Storage_TestInit { + /// @notice Test that getBool returns the correct bool value from storage. + /// @param _slot The storage slot to test with. + /// @param _value The bool value to test with. + function testFuzz_getBool_succeeds(bytes32 _slot, bool _value) external { + setter.setBool(_slot, _value); + assertEq(setter.getBool(_slot), _value); + assertEq(_value, vm.load(address(setter), _slot) == bytes32(uint256(1))); + } +} diff --git a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol index e21504652eb6c..0df917ef898e9 100644 --- a/packages/contracts-bedrock/test/libraries/TransientContext.t.sol +++ b/packages/contracts-bedrock/test/libraries/TransientContext.t.sol @@ -170,10 +170,10 @@ contract TransientContext_ReentrantAware_Test is TransientContext_TestInit, Tran } } -/// @title TransientContext_Unclassified_Test +/// @title TransientContext_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `TransientContext` /// contract or are testing multiple functions at once. -contract TransientContext_Unclassified_Test is TransientContext_Set_Test { +contract TransientContext_Uncategorized_Test is TransientContext_Set_Test { /// @notice Tests that `set()` and `get()` work together. /// @param _slot Slot to test. /// @param _value Value to test. diff --git a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol index b0b5b3d2a5673..c0d2e784864f8 100644 --- a/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployImplementations.t.sol @@ -7,8 +7,9 @@ import { Test, stdStorage, StdStorage } from "forge-std/Test.sol"; // Libraries import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Chains } from "scripts/libraries/Chains.sol"; -import { LibString } from "@solady/utils/LibString.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { GameTypes } from "src/dispute/lib/Types.sol"; // Interfaces import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; @@ -32,7 +33,7 @@ contract DeployImplementations_Test is Test { ISuperchainConfig superchainConfigProxy = ISuperchainConfig(makeAddr("superchainConfigProxy")); IProtocolVersions protocolVersionsProxy = IProtocolVersions(makeAddr("protocolVersionsProxy")); IProxyAdmin superchainProxyAdmin = IProxyAdmin(makeAddr("superchainProxyAdmin")); - address upgradeController = makeAddr("upgradeController"); + address l1ProxyAdminOwner = makeAddr("l1ProxyAdminOwner"); address challenger = makeAddr("challenger"); function setUp() public virtual { @@ -48,6 +49,9 @@ contract DeployImplementations_Test is Test { DeployImplementations.Output memory output = deployImplementations.run(input); assertNotEq(address(output.systemConfigImpl), address(0)); + // V2 contracts should not be deployed with default flag (false) + assertEq(address(output.faultDisputeGameV2Impl), address(0)); + assertEq(address(output.permissionedDisputeGameV2Impl), address(0)); } function test_reuseImplementation_succeeds() public { @@ -71,6 +75,11 @@ contract DeployImplementations_Test is Test { assertEq(address(output1.anchorStateRegistryImpl), address(output2.anchorStateRegistryImpl), "1100"); assertEq(address(output1.opcm), address(output2.opcm), "1200"); assertEq(address(output1.ethLockboxImpl), address(output2.ethLockboxImpl), "1300"); + // V2 contracts should both be address(0) since default flag is false + assertEq(address(output1.faultDisputeGameV2Impl), address(output2.faultDisputeGameV2Impl), "1400"); + assertEq(address(output1.permissionedDisputeGameV2Impl), address(output2.permissionedDisputeGameV2Impl), "1500"); + assertEq(address(output1.faultDisputeGameV2Impl), address(0), "V2 contracts should be null"); + assertEq(address(output1.permissionedDisputeGameV2Impl), address(0), "V2 contracts should be null"); } function testFuzz_run_memory_succeeds( @@ -79,20 +88,25 @@ contract DeployImplementations_Test is Test { uint64 _challengePeriodSeconds, uint256 _proofMaturityDelaySeconds, uint256 _disputeGameFinalityDelaySeconds, - string memory _l1ContractsRelease, - address _superchainConfigImpl + address _superchainConfigImpl, + uint256 _faultGameV2MaxGameDepth, + uint256 _faultGameV2SplitDepth, + uint256 _faultGameV2ClockExtension, + uint256 _faultGameV2MaxClockDuration, + bytes32 _devFeatureBitmap ) public { - vm.assume(_withdrawalDelaySeconds != 0); - vm.assume(_minProposalSizeBytes != 0); - vm.assume(_challengePeriodSeconds != 0); - vm.assume(_proofMaturityDelaySeconds != 0); - vm.assume(_disputeGameFinalityDelaySeconds != 0); - vm.assume(!LibString.eq(_l1ContractsRelease, "")); - vm.assume(_superchainConfigImpl != address(0)); + _withdrawalDelaySeconds = bound(_withdrawalDelaySeconds, 1, type(uint256).max); + _minProposalSizeBytes = bound(_minProposalSizeBytes, 1, 1000000); + _challengePeriodSeconds = uint64(bound(uint256(_challengePeriodSeconds), 1, type(uint64).max)); + _proofMaturityDelaySeconds = bound(_proofMaturityDelaySeconds, 1, type(uint256).max); + _disputeGameFinalityDelaySeconds = bound(_disputeGameFinalityDelaySeconds, 1, type(uint256).max); + // Ensure superchainConfigImpl is not zero address + vm.assume(_superchainConfigImpl != address(0)); // Must configure the ProxyAdmin contract. + superchainProxyAdmin = IProxyAdmin( DeployUtils.create1({ _name: "ProxyAdmin", @@ -112,6 +126,22 @@ contract DeployImplementations_Test is Test { vm.prank(address(superchainProxyAdmin)); IProxy(payable(address(superchainConfigProxy))).upgradeTo(address(superchainConfigImpl)); + _faultGameV2MaxGameDepth = bound(_faultGameV2MaxGameDepth, 4, 125); + _faultGameV2SplitDepth = + bound(_faultGameV2SplitDepth, 2, _faultGameV2MaxGameDepth > 3 ? _faultGameV2MaxGameDepth - 2 : 2); + _faultGameV2ClockExtension = bound(_faultGameV2ClockExtension, 1, 7 days); + _faultGameV2MaxClockDuration = bound(_faultGameV2MaxClockDuration, _faultGameV2ClockExtension * 2, 30 days); + + // When V2 is not enabled, set V2 params to 0 to match script expectations + // Otherwise ensure they remain within bounds already set + bool isV2Enabled = DevFeatures.isDevFeatureEnabled(_devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES); + if (!isV2Enabled) { + _faultGameV2MaxGameDepth = 0; + _faultGameV2SplitDepth = 0; + _faultGameV2ClockExtension = 0; + _faultGameV2MaxClockDuration = 0; + } + DeployImplementations.Input memory input = DeployImplementations.Input( _withdrawalDelaySeconds, _minProposalSizeBytes, @@ -119,11 +149,15 @@ contract DeployImplementations_Test is Test { _proofMaturityDelaySeconds, _disputeGameFinalityDelaySeconds, StandardConstants.MIPS_VERSION, // mipsVersion - _l1ContractsRelease, + _devFeatureBitmap, // devFeatureBitmap (fuzzed) + _faultGameV2MaxGameDepth, // faultGameV2MaxGameDepth (bounded) + _faultGameV2SplitDepth, // faultGameV2SplitDepth (bounded) + _faultGameV2ClockExtension, // faultGameV2ClockExtension (bounded) + _faultGameV2MaxClockDuration, // faultGameV2MaxClockDuration (bounded) superchainConfigProxy, protocolVersionsProxy, superchainProxyAdmin, - upgradeController, + l1ProxyAdminOwner, challenger ); @@ -143,6 +177,45 @@ contract DeployImplementations_Test is Test { assertNotEq(address(output.opcmDeployer), address(0), "1000"); assertNotEq(address(output.opcmGameTypeAdder), address(0), "1100"); + // Check V2 contracts based on feature flag + bool v2Enabled = DevFeatures.isDevFeatureEnabled(_devFeatureBitmap, DevFeatures.DEPLOY_V2_DISPUTE_GAMES); + if (v2Enabled) { + assertNotEq(address(output.faultDisputeGameV2Impl), address(0), "V2 should be deployed when enabled"); + assertNotEq(address(output.permissionedDisputeGameV2Impl), address(0), "V2 should be deployed when enabled"); + + // Verify V2 constructor parameters match fuzz inputs + assertEq(output.faultDisputeGameV2Impl.maxGameDepth(), _faultGameV2MaxGameDepth, "FDGv2 maxGameDepth"); + assertEq(output.faultDisputeGameV2Impl.splitDepth(), _faultGameV2SplitDepth, "FDGv2 splitDepth"); + assertEq( + output.faultDisputeGameV2Impl.clockExtension().raw(), + uint64(_faultGameV2ClockExtension), + "FDGv2 clockExtension" + ); + assertEq( + output.faultDisputeGameV2Impl.maxClockDuration().raw(), + uint64(_faultGameV2MaxClockDuration), + "FDGv2 maxClockDuration" + ); + + assertEq( + output.permissionedDisputeGameV2Impl.maxGameDepth(), _faultGameV2MaxGameDepth, "PDGv2 maxGameDepth" + ); + assertEq(output.permissionedDisputeGameV2Impl.splitDepth(), _faultGameV2SplitDepth, "PDGv2 splitDepth"); + assertEq( + output.permissionedDisputeGameV2Impl.clockExtension().raw(), + uint64(_faultGameV2ClockExtension), + "PDGv2 clockExtension" + ); + assertEq( + output.permissionedDisputeGameV2Impl.maxClockDuration().raw(), + uint64(_faultGameV2MaxClockDuration), + "PDGv2 maxClockDuration" + ); + } else { + assertEq(address(output.faultDisputeGameV2Impl), address(0), "V2 should be null when disabled"); + assertEq(address(output.permissionedDisputeGameV2Impl), address(0), "V2 should be null when disabled"); + } + // Address contents assertions bytes memory empty; @@ -159,6 +232,17 @@ contract DeployImplementations_Test is Test { assertNotEq(address(output.opcmDeployer).code, empty, "2200"); assertNotEq(address(output.opcmGameTypeAdder).code, empty, "2300"); + // V2 contracts code existence based on feature flag + if (v2Enabled) { + assertNotEq(address(output.faultDisputeGameV2Impl).code, empty, "V2 FDG should have code when enabled"); + assertNotEq( + address(output.permissionedDisputeGameV2Impl).code, empty, "V2 PDG should have code when enabled" + ); + } else { + assertEq(address(output.faultDisputeGameV2Impl).code, empty, "V2 FDG should be empty when disabled"); + assertEq(address(output.permissionedDisputeGameV2Impl).code, empty, "V2 PDG should be empty when disabled"); + } + // Architecture assertions. assertEq(address(output.mipsSingleton.oracle()), address(output.preimageOracleSingleton), "600"); } @@ -219,11 +303,6 @@ contract DeployImplementations_Test is Test { vm.expectRevert("DeployImplementations: mipsVersion not set"); deployImplementations.run(input); - input = defaultInput(); - input.l1ContractsRelease = ""; - vm.expectRevert("DeployImplementations: l1ContractsRelease not set"); - deployImplementations.run(input); - input = defaultInput(); input.superchainConfigProxy = ISuperchainConfig(address(0)); vm.expectRevert("DeployImplementations: superchainConfigProxy not set"); @@ -240,11 +319,228 @@ contract DeployImplementations_Test is Test { deployImplementations.run(input); input = defaultInput(); - input.upgradeController = address(0); - vm.expectRevert("DeployImplementations: upgradeController not set"); + input.l1ProxyAdminOwner = address(0); + vm.expectRevert("DeployImplementations: L1ProxyAdminOwner not set"); + deployImplementations.run(input); + } + + function test_deployImplementation_withV2Enabled_succeeds() public { + DeployImplementations.Input memory input = defaultInput(); + input.devFeatureBitmap = DevFeatures.DEPLOY_V2_DISPUTE_GAMES; + DeployImplementations.Output memory output = deployImplementations.run(input); + + assertNotEq(address(output.faultDisputeGameV2Impl), address(0), "FaultDisputeGameV2 should be deployed"); + assertNotEq( + address(output.permissionedDisputeGameV2Impl), address(0), "PermissionedDisputeGameV2 should be deployed" + ); + + // Validate constructor args for FaultDisputeGameV2 + assertEq( + uint256(output.faultDisputeGameV2Impl.gameType().raw()), + uint256(GameTypes.CANNON.raw()), + "FaultDisputeGameV2 gameType incorrect" + ); + assertEq(output.faultDisputeGameV2Impl.maxGameDepth(), 73, "FaultDisputeGameV2 maxGameDepth incorrect"); + assertEq(output.faultDisputeGameV2Impl.splitDepth(), 30, "FaultDisputeGameV2 splitDepth incorrect"); + assertEq( + output.faultDisputeGameV2Impl.clockExtension().raw(), 10800, "FaultDisputeGameV2 clockExtension incorrect" + ); + assertEq( + output.faultDisputeGameV2Impl.maxClockDuration().raw(), + 302400, + "FaultDisputeGameV2 maxClockDuration incorrect" + ); + + // Validate constructor args for PermissionedDisputeGameV2 + assertEq( + uint256(output.permissionedDisputeGameV2Impl.gameType().raw()), + uint256(GameTypes.PERMISSIONED_CANNON.raw()), + "PermissionedDisputeGameV2 gameType incorrect" + ); + assertEq( + output.permissionedDisputeGameV2Impl.maxGameDepth(), 73, "PermissionedDisputeGameV2 maxGameDepth incorrect" + ); + assertEq( + output.permissionedDisputeGameV2Impl.splitDepth(), 30, "PermissionedDisputeGameV2 splitDepth incorrect" + ); + assertEq( + output.permissionedDisputeGameV2Impl.clockExtension().raw(), + 10800, + "PermissionedDisputeGameV2 clockExtension incorrect" + ); + assertEq( + output.permissionedDisputeGameV2Impl.maxClockDuration().raw(), + 302400, + "PermissionedDisputeGameV2 maxClockDuration incorrect" + ); + } + + function test_v2ParamsValidation_withFlagDisabled_succeeds() public { + // When V2 flag is disabled, V2 params should be 0 or within safe bounds + DeployImplementations.Input memory input = defaultInput(); + input.devFeatureBitmap = bytes32(0); // V2 disabled + + // Test that zero values are accepted + input.faultGameV2MaxGameDepth = 0; + input.faultGameV2SplitDepth = 0; + input.faultGameV2ClockExtension = 0; + input.faultGameV2MaxClockDuration = 0; + + DeployImplementations.Output memory output = deployImplementations.run(input); + assertEq(address(output.faultDisputeGameV2Impl), address(0), "V2 FDG should be null when disabled"); + assertEq(address(output.permissionedDisputeGameV2Impl), address(0), "V2 PDG should be null when disabled"); + } + + function test_v2ParamsValidation_withHugeValues_reverts() public { + // When V2 flag is enabled, huge V2 params should be rejected + DeployImplementations.Input memory input = defaultInput(); + input.devFeatureBitmap = DevFeatures.DEPLOY_V2_DISPUTE_GAMES; // V2 enabled + + // Test that huge clock extension is rejected + input.faultGameV2ClockExtension = type(uint256).max; + vm.expectRevert("DeployImplementations: faultGameV2ClockExtension too large for uint64"); + deployImplementations.run(input); + + // Reset and test huge max clock duration + input.faultGameV2ClockExtension = 100; + input.faultGameV2MaxClockDuration = type(uint256).max; + vm.expectRevert("DeployImplementations: faultGameV2MaxClockDuration too large for uint64"); + deployImplementations.run(input); + + // Reset and test huge max game depth + input.faultGameV2MaxClockDuration = 200; + input.faultGameV2MaxGameDepth = 300; // > 200 + vm.expectRevert("DeployImplementations: faultGameV2MaxGameDepth out of valid range (1-125)"); + deployImplementations.run(input); + + // Reset and test invalid split depth (too large, >= maxGameDepth) + input.faultGameV2MaxGameDepth = 50; + input.faultGameV2SplitDepth = 50; // splitDepth + 1 must be < maxGameDepth + vm.expectRevert("DeployImplementations: faultGameV2SplitDepth must be >= 2 and splitDepth + 1 < maxGameDepth"); + deployImplementations.run(input); + + // Reset and test invalid split depth (too small, < 2) + input.faultGameV2MaxGameDepth = 50; + input.faultGameV2SplitDepth = 1; // < 2 + vm.expectRevert("DeployImplementations: faultGameV2SplitDepth must be >= 2 and splitDepth + 1 < maxGameDepth"); + deployImplementations.run(input); + + // Reset and test clock extension = 0 (must be > 0 when V2 enabled) + input.faultGameV2SplitDepth = 10; + input.faultGameV2ClockExtension = 0; + input.faultGameV2MaxClockDuration = 1000; + vm.expectRevert("DeployImplementations: faultGameV2ClockExtension must be > 0"); + deployImplementations.run(input); + + // Reset and test maxClockDuration < clockExtension + input.faultGameV2ClockExtension = 1000; + input.faultGameV2MaxClockDuration = 500; // < clockExtension + vm.expectRevert("DeployImplementations: maxClockDuration must be >= clockExtension"); deployImplementations.run(input); } + function test_deployImplementation_withV2Disabled_succeeds() public { + DeployImplementations.Input memory input = defaultInput(); + input.devFeatureBitmap = bytes32(0); + DeployImplementations.Output memory output = deployImplementations.run(input); + + assertEq(address(output.faultDisputeGameV2Impl), address(0), "FaultDisputeGameV2 should not be deployed"); + assertEq( + address(output.permissionedDisputeGameV2Impl), + address(0), + "PermissionedDisputeGameV2 should not be deployed" + ); + + // Ensure other contracts are still deployed + assertNotEq(address(output.systemConfigImpl), address(0), "SystemConfig should still be deployed"); + assertNotEq(address(output.disputeGameFactoryImpl), address(0), "DisputeGameFactory should still be deployed"); + } + + function test_reuseImplementation_withV2Flags_succeeds() public { + DeployImplementations.Input memory inputEnabled = defaultInput(); + inputEnabled.devFeatureBitmap = DevFeatures.DEPLOY_V2_DISPUTE_GAMES; + DeployImplementations.Output memory output1 = deployImplementations.run(inputEnabled); + + DeployImplementations.Input memory inputDisabled = defaultInput(); + inputDisabled.devFeatureBitmap = bytes32(0); + DeployImplementations.Output memory output2 = deployImplementations.run(inputDisabled); + + // V2 contracts should be different between enabled and disabled + assertTrue( + address(output1.faultDisputeGameV2Impl) != address(output2.faultDisputeGameV2Impl), + "V2 addresses should differ between enabled/disabled" + ); + assertTrue( + address(output1.permissionedDisputeGameV2Impl) != address(output2.permissionedDisputeGameV2Impl), + "V2 addresses should differ between enabled/disabled" + ); + + // Validate constructor args for FaultDisputeGameV2 + assertEq( + uint256(output1.faultDisputeGameV2Impl.gameType().raw()), + uint256(GameTypes.CANNON.raw()), + "FaultDisputeGameV2 gameType incorrect" + ); + assertEq(output1.faultDisputeGameV2Impl.maxGameDepth(), 73, "FaultDisputeGameV2 maxGameDepth incorrect"); + assertEq(output1.faultDisputeGameV2Impl.splitDepth(), 30, "FaultDisputeGameV2 splitDepth incorrect"); + assertEq( + output1.faultDisputeGameV2Impl.clockExtension().raw(), 10800, "FaultDisputeGameV2 clockExtension incorrect" + ); + assertEq( + output1.faultDisputeGameV2Impl.maxClockDuration().raw(), + 302400, + "FaultDisputeGameV2 maxClockDuration incorrect" + ); + + // Validate constructor args for PermissionedDisputeGameV2 + assertEq( + uint256(output1.permissionedDisputeGameV2Impl.gameType().raw()), + uint256(GameTypes.PERMISSIONED_CANNON.raw()), + "PermissionedDisputeGameV2 gameType incorrect" + ); + assertEq( + output1.permissionedDisputeGameV2Impl.maxGameDepth(), 73, "PermissionedDisputeGameV2 maxGameDepth incorrect" + ); + assertEq( + output1.permissionedDisputeGameV2Impl.splitDepth(), 30, "PermissionedDisputeGameV2 splitDepth incorrect" + ); + assertEq( + output1.permissionedDisputeGameV2Impl.clockExtension().raw(), + 10800, + "PermissionedDisputeGameV2 clockExtension incorrect" + ); + assertEq( + output1.permissionedDisputeGameV2Impl.maxClockDuration().raw(), + 302400, + "PermissionedDisputeGameV2 maxClockDuration incorrect" + ); + + // Other contracts should remain the same + assertEq( + address(output1.systemConfigImpl), + address(output2.systemConfigImpl), + "SystemConfig addresses should be the same" + ); + assertEq( + address(output1.disputeGameFactoryImpl), + address(output2.disputeGameFactoryImpl), + "DisputeGameFactory addresses should be the same" + ); + + // Running with same flags should produce same results + DeployImplementations.Output memory output3 = deployImplementations.run(inputEnabled); + assertEq( + address(output1.faultDisputeGameV2Impl), + address(output3.faultDisputeGameV2Impl), + "V2 enabled addresses should be deterministic" + ); + assertEq( + address(output1.permissionedDisputeGameV2Impl), + address(output3.permissionedDisputeGameV2Impl), + "V2 enabled addresses should be deterministic" + ); + } + function defaultInput() private view returns (DeployImplementations.Input memory input_) { input_ = DeployImplementations.Input( withdrawalDelaySeconds, @@ -253,11 +549,15 @@ contract DeployImplementations_Test is Test { proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds, StandardConstants.MIPS_VERSION, // mipsVersion - "dev-release", // l1ContractsRelease + bytes32(0), // devFeatureBitmap + 73, // faultGameV2MaxGameDepth + 30, // faultGameV2SplitDepth + 10800, // faultGameV2ClockExtension + 302400, // faultGameV2MaxClockDuration superchainConfigProxy, protocolVersionsProxy, superchainProxyAdmin, - upgradeController, + l1ProxyAdminOwner, challenger ); } diff --git a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol index 9683181069941..7b3753dc8f7fa 100644 --- a/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol +++ b/packages/contracts-bedrock/test/opcm/DeployOPChain.t.sol @@ -5,361 +5,75 @@ import { Test } from "forge-std/Test.sol"; import { DeploySuperchain } from "scripts/deploy/DeploySuperchain.s.sol"; import { DeployImplementations } from "scripts/deploy/DeployImplementations.s.sol"; -import { DeployOPChainInput, DeployOPChain, DeployOPChainOutput } from "scripts/deploy/DeployOPChain.s.sol"; -import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; +import { DeployOPChain } from "scripts/deploy/DeployOPChain.s.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; +import { Types } from "scripts/libraries/Types.sol"; -import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; - -import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; -import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; -import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; -import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; -import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; -import { IResolvedDelegateProxy } from "interfaces/legacy/IResolvedDelegateProxy.sol"; - -import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; -import { IProtocolVersions, ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; -import { IProxy } from "interfaces/universal/IProxy.sol"; - -import { Claim, Duration, GameType, GameTypes, Hash, Proposal } from "src/dispute/lib/Types.sol"; - -contract DeployOPChainInput_Test is Test { - DeployOPChainInput doi; - - // Define defaults. - address opChainProxyAdminOwner = makeAddr("opChainProxyAdminOwner"); - address systemConfigOwner = makeAddr("systemConfigOwner"); - address batcher = makeAddr("batcher"); - address unsafeBlockSigner = makeAddr("unsafeBlockSigner"); - address proposer = makeAddr("proposer"); - address challenger = makeAddr("challenger"); - address opcm = makeAddr("opcm"); - uint32 basefeeScalar = 100; - uint32 blobBaseFeeScalar = 200; - uint256 l2ChainId = 300; - string saltMixer = "saltMixer"; - - function setUp() public { - doi = new DeployOPChainInput(); - } - - function test_set_succeeds() public { - doi.set(doi.opChainProxyAdminOwner.selector, opChainProxyAdminOwner); - doi.set(doi.systemConfigOwner.selector, systemConfigOwner); - doi.set(doi.batcher.selector, batcher); - doi.set(doi.unsafeBlockSigner.selector, unsafeBlockSigner); - doi.set(doi.proposer.selector, proposer); - doi.set(doi.challenger.selector, challenger); - doi.set(doi.basefeeScalar.selector, basefeeScalar); - doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); - doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.allowCustomDisputeParameters.selector, true); - doi.set(doi.opcm.selector, opcm); - vm.etch(opcm, hex"01"); - - // Compare the default inputs to the getter methods. - assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "200"); - assertEq(systemConfigOwner, doi.systemConfigOwner(), "300"); - assertEq(batcher, doi.batcher(), "400"); - assertEq(unsafeBlockSigner, doi.unsafeBlockSigner(), "500"); - assertEq(proposer, doi.proposer(), "600"); - assertEq(challenger, doi.challenger(), "700"); - assertEq(basefeeScalar, doi.basefeeScalar(), "800"); - assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "900"); - assertEq(l2ChainId, doi.l2ChainId(), "1000"); - assertEq(opcm, address(doi.opcm()), "1100"); - assertEq(true, doi.allowCustomDisputeParameters(), "1200"); - } - - function test_getters_whenNotSet_reverts() public { - bytes memory expectedErr = "DeployOPChainInput: not set"; - - vm.expectRevert(expectedErr); - doi.opChainProxyAdminOwner(); - - vm.expectRevert(expectedErr); - doi.systemConfigOwner(); - - vm.expectRevert(expectedErr); - doi.batcher(); - - vm.expectRevert(expectedErr); - doi.unsafeBlockSigner(); - - vm.expectRevert(expectedErr); - doi.proposer(); - - vm.expectRevert(expectedErr); - doi.challenger(); - - vm.expectRevert(expectedErr); - doi.basefeeScalar(); - - vm.expectRevert(expectedErr); - doi.blobBaseFeeScalar(); - - vm.expectRevert(expectedErr); - doi.l2ChainId(); - } -} - -contract DeployOPChainOutput_Test is Test { - DeployOPChainOutput doo; - - // We set the non proxy contracts in storage because doing it locally in 'test_set_succeeds' function results in - // stack too deep. - IAddressManager addressManager = DeployUtils.buildAddressManager(); - IProxyAdmin opChainProxyAdmin = IProxyAdmin(makeAddr("opChainProxyAdmin")); - IAnchorStateRegistry anchorStateRegistryImpl = IAnchorStateRegistry(makeAddr("anchorStateRegistryImpl")); - IFaultDisputeGame faultDisputeGame = IFaultDisputeGame(makeAddr("faultDisputeGame")); - IPermissionedDisputeGame permissionedDisputeGame = IPermissionedDisputeGame(makeAddr("permissionedDisputeGame")); - - function setUp() public { - doo = new DeployOPChainOutput(); - } - - function test_set_succeeds() public { - vm.etch(address(opChainProxyAdmin), hex"01"); - (IProxy l1ERC721BridgeProxy) = DeployUtils.buildERC1967ProxyWithImpl("l1ERC721BridgeProxy"); - (IProxy systemConfigProxy) = DeployUtils.buildERC1967ProxyWithImpl("systemConfigProxy"); - (IProxy optimismMintableERC20FactoryProxy) = - DeployUtils.buildERC1967ProxyWithImpl("optimismMintableERC20FactoryProxy"); - (IL1ChugSplashProxy l1StandardBridgeProxy) = DeployUtils.buildL1ChugSplashProxyWithImpl("l1StandardBridgeProxy"); - (IResolvedDelegateProxy l1CrossDomainMessengerProxy) = - DeployUtils.buildResolvedDelegateProxyWithImpl(addressManager, "OVM_L1CrossDomainMessenger"); - (IProxy optimismPortalProxy) = DeployUtils.buildERC1967ProxyWithImpl("OptimismPortalProxy"); - (IProxy disputeGameFactoryProxy) = DeployUtils.buildERC1967ProxyWithImpl("disputeGameFactoryProxy"); - (IProxy anchorStateRegistryProxy) = DeployUtils.buildERC1967ProxyWithImpl("anchorStateRegistryProxy"); - vm.etch(address(anchorStateRegistryImpl), hex"01"); - vm.etch(address(faultDisputeGame), hex"01"); - vm.etch(address(permissionedDisputeGame), hex"01"); - // TODO: Eventually switch from Permissioned to Permissionless. - // (IProxy delayedWETHPermissionlessGameProxy) = - // DeployUtils.buildERC1967ProxyWithImpl("delayedWETHPermissionlessGameProxy"); - (IProxy delayedWETHPermissionedGameProxy) = - DeployUtils.buildERC1967ProxyWithImpl("delayedWETHPermissionedGameProxy"); - - doo.set(doo.opChainProxyAdmin.selector, address(opChainProxyAdmin)); - doo.set(doo.addressManager.selector, address(addressManager)); - doo.set(doo.l1ERC721BridgeProxy.selector, address(l1ERC721BridgeProxy)); - doo.set(doo.systemConfigProxy.selector, address(systemConfigProxy)); - doo.set(doo.optimismMintableERC20FactoryProxy.selector, address(optimismMintableERC20FactoryProxy)); - doo.set(doo.l1StandardBridgeProxy.selector, address(l1StandardBridgeProxy)); - doo.set(doo.l1CrossDomainMessengerProxy.selector, address(l1CrossDomainMessengerProxy)); - doo.set(doo.optimismPortalProxy.selector, address(optimismPortalProxy)); - doo.set(doo.disputeGameFactoryProxy.selector, address(disputeGameFactoryProxy)); - doo.set(doo.anchorStateRegistryProxy.selector, address(anchorStateRegistryProxy)); - doo.set(doo.faultDisputeGame.selector, address(faultDisputeGame)); - doo.set(doo.permissionedDisputeGame.selector, address(permissionedDisputeGame)); - doo.set(doo.delayedWETHPermissionedGameProxy.selector, address(delayedWETHPermissionedGameProxy)); - // TODO: Eventually switch from Permissioned to Permissionless. - // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, address(delayedWETHPermissionlessGameProxy)); - - assertEq(address(opChainProxyAdmin), address(doo.opChainProxyAdmin()), "100"); - assertEq(address(addressManager), address(doo.addressManager()), "200"); - assertEq(address(l1ERC721BridgeProxy), address(doo.l1ERC721BridgeProxy()), "300"); - assertEq(address(systemConfigProxy), address(doo.systemConfigProxy()), "400"); - assertEq(address(optimismMintableERC20FactoryProxy), address(doo.optimismMintableERC20FactoryProxy()), "500"); - assertEq(address(l1StandardBridgeProxy), address(doo.l1StandardBridgeProxy()), "600"); - assertEq(address(l1CrossDomainMessengerProxy), address(doo.l1CrossDomainMessengerProxy()), "700"); - assertEq(address(optimismPortalProxy), address(doo.optimismPortalProxy()), "800"); - assertEq(address(disputeGameFactoryProxy), address(doo.disputeGameFactoryProxy()), "900"); - assertEq(address(anchorStateRegistryProxy), address(doo.anchorStateRegistryProxy()), "1100"); - assertEq(address(faultDisputeGame), address(doo.faultDisputeGame()), "1300"); - assertEq(address(permissionedDisputeGame), address(doo.permissionedDisputeGame()), "1400"); - assertEq(address(delayedWETHPermissionedGameProxy), address(doo.delayedWETHPermissionedGameProxy()), "1500"); - // TODO: Eventually switch from Permissioned to Permissionless. - // assertEq(address(delayedWETHPermissionlessGameProxy), address(doo.delayedWETHPermissionlessGameProxy()), - // "1600"); - } - - function test_getters_whenNotSet_reverts() public { - bytes memory expectedErr = "DeployUtils: zero address"; - - vm.expectRevert(expectedErr); - doo.opChainProxyAdmin(); - - vm.expectRevert(expectedErr); - doo.addressManager(); - - vm.expectRevert(expectedErr); - doo.l1ERC721BridgeProxy(); - - vm.expectRevert(expectedErr); - doo.systemConfigProxy(); - - vm.expectRevert(expectedErr); - doo.optimismMintableERC20FactoryProxy(); - - vm.expectRevert(expectedErr); - doo.l1StandardBridgeProxy(); - - vm.expectRevert(expectedErr); - doo.l1CrossDomainMessengerProxy(); - - vm.expectRevert(expectedErr); - doo.optimismPortalProxy(); - - vm.expectRevert(expectedErr); - doo.disputeGameFactoryProxy(); - - vm.expectRevert(expectedErr); - doo.anchorStateRegistryProxy(); - - vm.expectRevert(expectedErr); - doo.faultDisputeGame(); +import { Claim, Duration, GameType, GameTypes } from "src/dispute/lib/Types.sol"; - vm.expectRevert(expectedErr); - doo.permissionedDisputeGame(); - - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionedGameProxy(); - - // TODO: Eventually switch from Permissioned to Permissionless. - // vm.expectRevert(expectedErr); - // doo.delayedWETHPermissionlessGameProxy(); - } - - function test_getters_whenAddrHasNoCode_reverts() public { - address emptyAddr = makeAddr("emptyAddr"); - bytes memory expectedErr = bytes(string.concat("DeployUtils: no code at ", vm.toString(emptyAddr))); - - doo.set(doo.opChainProxyAdmin.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.opChainProxyAdmin(); - - doo.set(doo.addressManager.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.addressManager(); - - doo.set(doo.l1ERC721BridgeProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.l1ERC721BridgeProxy(); - - doo.set(doo.systemConfigProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.systemConfigProxy(); - - doo.set(doo.optimismMintableERC20FactoryProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.optimismMintableERC20FactoryProxy(); - - doo.set(doo.l1StandardBridgeProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.l1StandardBridgeProxy(); - - doo.set(doo.l1CrossDomainMessengerProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.l1CrossDomainMessengerProxy(); - - doo.set(doo.optimismPortalProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.optimismPortalProxy(); - - doo.set(doo.disputeGameFactoryProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.disputeGameFactoryProxy(); - - doo.set(doo.anchorStateRegistryProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.anchorStateRegistryProxy(); - - doo.set(doo.faultDisputeGame.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.faultDisputeGame(); - - doo.set(doo.permissionedDisputeGame.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.permissionedDisputeGame(); - - doo.set(doo.delayedWETHPermissionedGameProxy.selector, emptyAddr); - vm.expectRevert(expectedErr); - doo.delayedWETHPermissionedGameProxy(); - - // TODO: Eventually switch from Permissioned to Permissionless. - // doo.set(doo.delayedWETHPermissionlessGameProxy.selector, emptyAddr); - // vm.expectRevert(expectedErr); - // doo.delayedWETHPermissionlessGameProxy(); - } -} - -// To mimic a production environment, we default to integration tests here that actually run the -// DeploySuperchain and DeployImplementations scripts. contract DeployOPChain_TestBase is Test { + DeploySuperchain deploySuperchain; + DeployImplementations deployImplementations; DeployOPChain deployOPChain; - DeployOPChainInput doi; - DeployOPChainOutput doo; + Types.DeployOPChainInput deployOPChainInput; - // Define default inputs for DeploySuperchain. - address superchainProxyAdminOwner = makeAddr("defaultSuperchainProxyAdminOwner"); - address protocolVersionsOwner = makeAddr("defaultProtocolVersionsOwner"); - address guardian = makeAddr("defaultGuardian"); + // DeploySuperchain default inputs. + address superchainProxyAdminOwner = makeAddr("superchainProxyAdminOwner"); + address protocolVersionsOwner = makeAddr("protocolVersionsOwner"); + address guardian = makeAddr("guardian"); bool paused = false; - ProtocolVersion requiredProtocolVersion = ProtocolVersion.wrap(1); - ProtocolVersion recommendedProtocolVersion = ProtocolVersion.wrap(2); + bytes32 requiredProtocolVersion = bytes32(uint256(1)); + bytes32 recommendedProtocolVersion = bytes32(uint256(2)); - // Define default inputs for DeployImplementations. - // `superchainConfigProxy` and `protocolVersionsProxy` are set during `setUp` since they are - // outputs of the previous step. + // DeployImplementations default inputs. + // - superchainConfigProxy and protocolVersionsProxy are set during `setUp` since they are + // outputs of DeploySuperchain. uint256 withdrawalDelaySeconds = 100; uint256 minProposalSizeBytes = 200; uint256 challengePeriodSeconds = 300; uint256 proofMaturityDelaySeconds = 400; uint256 disputeGameFinalityDelaySeconds = 500; - string release = "dev-release"; // this means implementation contracts will be deployed - ISuperchainConfig superchainConfigProxy; - IProtocolVersions protocolVersionsProxy; - IProxyAdmin superchainProxyAdmin; - address upgradeController; - // Define default inputs for DeployOPChain. - // `opcm` is set during `setUp` since it is an output of the previous step. - address opChainProxyAdminOwner = makeAddr("defaultOPChainProxyAdminOwner"); - address systemConfigOwner = makeAddr("defaultSystemConfigOwner"); - address batcher = makeAddr("defaultBatcher"); - address unsafeBlockSigner = makeAddr("defaultUnsafeBlockSigner"); - address proposer = makeAddr("defaultProposer"); - address challenger = makeAddr("defaultChallenger"); + + // DeployOPChain default inputs. + // - opcm is set during `setUp` since it is an output of DeployImplementations. + address opChainProxyAdminOwner = makeAddr("opChainProxyAdminOwner"); + address systemConfigOwner = makeAddr("systemConfigOwner"); + address batcher = makeAddr("batcher"); + address unsafeBlockSigner = makeAddr("unsafeBlockSigner"); + address proposer = makeAddr("proposer"); + address challenger = makeAddr("challenger"); uint32 basefeeScalar = 100; uint32 blobBaseFeeScalar = 200; uint256 l2ChainId = 300; - Proposal startingAnchorRoot = Proposal({ root: Hash.wrap(keccak256("defaultOutputRoot")), l2SequenceNumber: 400 }); - IOPContractsManager opcm = IOPContractsManager(address(0)); - string saltMixer = "defaultSaltMixer"; + string saltMixer = "saltMixer"; uint64 gasLimit = 60_000_000; - // Configurable dispute game parameters. - uint32 disputeGameType = GameType.unwrap(GameTypes.PERMISSIONED_CANNON); - bytes32 disputeAbsolutePrestate = hex"038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c"; + GameType disputeGameType = GameTypes.PERMISSIONED_CANNON; + Claim disputeAbsolutePrestate = Claim.wrap(0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c); uint256 disputeMaxGameDepth = 73; uint256 disputeSplitDepth = 30; - uint64 disputeClockExtension = Duration.unwrap(Duration.wrap(3 hours)); - uint64 disputeMaxClockDuration = Duration.unwrap(Duration.wrap(3.5 days)); + Duration disputeClockExtension = Duration.wrap(3 hours); + Duration disputeMaxClockDuration = Duration.wrap(3.5 days); + IOPContractsManager opcm; function setUp() public virtual { - // Configure and deploy Superchain contracts - DeploySuperchain deploySuperchain = new DeploySuperchain(); + deploySuperchain = new DeploySuperchain(); + deployImplementations = new DeployImplementations(); + deployOPChain = new DeployOPChain(); + // 1) DeploySuperchain DeploySuperchain.Output memory dso = deploySuperchain.run( DeploySuperchain.Input({ superchainProxyAdminOwner: superchainProxyAdminOwner, protocolVersionsOwner: protocolVersionsOwner, guardian: guardian, paused: paused, - requiredProtocolVersion: bytes32(ProtocolVersion.unwrap(requiredProtocolVersion)), - recommendedProtocolVersion: bytes32(ProtocolVersion.unwrap(recommendedProtocolVersion)) + requiredProtocolVersion: requiredProtocolVersion, + recommendedProtocolVersion: recommendedProtocolVersion }) ); - // Populate the inputs for DeployImplementations based on the output of DeploySuperchain. - superchainConfigProxy = dso.superchainConfigProxy; - protocolVersionsProxy = dso.protocolVersionsProxy; - superchainProxyAdmin = dso.superchainProxyAdmin; - upgradeController = superchainProxyAdmin.owner(); - - // Configure and deploy Implementation contracts - DeployImplementations deployImplementations = new DeployImplementations(); - + // 2) DeployImplementations (produces OPCM) DeployImplementations.Output memory dio = deployImplementations.run( DeployImplementations.Input({ withdrawalDelaySeconds: withdrawalDelaySeconds, @@ -367,22 +81,46 @@ contract DeployOPChain_TestBase is Test { challengePeriodSeconds: challengePeriodSeconds, proofMaturityDelaySeconds: proofMaturityDelaySeconds, disputeGameFinalityDelaySeconds: disputeGameFinalityDelaySeconds, - l1ContractsRelease: release, mipsVersion: StandardConstants.MIPS_VERSION, - superchainConfigProxy: superchainConfigProxy, - protocolVersionsProxy: protocolVersionsProxy, - superchainProxyAdmin: superchainProxyAdmin, - upgradeController: upgradeController, - challenger: challenger + faultGameV2MaxGameDepth: 73, + faultGameV2SplitDepth: 30, + faultGameV2ClockExtension: 10800, + faultGameV2MaxClockDuration: 302400, + superchainConfigProxy: dso.superchainConfigProxy, + protocolVersionsProxy: dso.protocolVersionsProxy, + superchainProxyAdmin: dso.superchainProxyAdmin, + l1ProxyAdminOwner: dso.superchainProxyAdmin.owner(), + challenger: challenger, + devFeatureBitmap: bytes32(0) }) ); - - // Set the OPContractsManager input for DeployOPChain. opcm = dio.opcm; - - // Deploy DeployOpChain, but defer populating the input values to the test suites inheriting this contract. - deployOPChain = new DeployOPChain(); - (doi, doo) = deployOPChain.etchIOContracts(); + vm.label(address(opcm), "opcm"); + + // 3) Build DeployOPChainInput struct + deployOPChainInput = Types.DeployOPChainInput({ + opChainProxyAdminOwner: opChainProxyAdminOwner, + systemConfigOwner: systemConfigOwner, + batcher: batcher, + unsafeBlockSigner: unsafeBlockSigner, + proposer: proposer, + challenger: challenger, + basefeeScalar: basefeeScalar, + blobBaseFeeScalar: blobBaseFeeScalar, + l2ChainId: l2ChainId, + opcm: address(opcm), + saltMixer: saltMixer, + gasLimit: gasLimit, + disputeGameType: disputeGameType, + disputeAbsolutePrestate: disputeAbsolutePrestate, + disputeMaxGameDepth: disputeMaxGameDepth, + disputeSplitDepth: disputeSplitDepth, + disputeClockExtension: disputeClockExtension, + disputeMaxClockDuration: disputeMaxClockDuration, + allowCustomDisputeParameters: false, + operatorFeeScalar: 0, + operatorFeeConstant: 0 + }); } } @@ -391,120 +129,65 @@ contract DeployOPChain_Test is DeployOPChain_TestBase { return keccak256(abi.encode(_seed, _i)); } - function testFuzz_run_memory_succeeds(bytes32 _seed) public { - opChainProxyAdminOwner = address(uint160(uint256(hash(_seed, 0)))); - systemConfigOwner = address(uint160(uint256(hash(_seed, 1)))); - batcher = address(uint160(uint256(hash(_seed, 2)))); - unsafeBlockSigner = address(uint160(uint256(hash(_seed, 3)))); - proposer = address(uint160(uint256(hash(_seed, 4)))); - challenger = address(uint160(uint256(hash(_seed, 5)))); - basefeeScalar = uint32(uint256(hash(_seed, 6))); - blobBaseFeeScalar = uint32(uint256(hash(_seed, 7))); - l2ChainId = uint256(hash(_seed, 8)); - - doi.set(doi.opChainProxyAdminOwner.selector, opChainProxyAdminOwner); - doi.set(doi.systemConfigOwner.selector, systemConfigOwner); - doi.set(doi.batcher.selector, batcher); - doi.set(doi.unsafeBlockSigner.selector, unsafeBlockSigner); - doi.set(doi.proposer.selector, proposer); - doi.set(doi.challenger.selector, challenger); - doi.set(doi.basefeeScalar.selector, basefeeScalar); - doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); - doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcm.selector, address(opcm)); - doi.set(doi.saltMixer.selector, saltMixer); - doi.set(doi.gasLimit.selector, gasLimit); - doi.set(doi.disputeGameType.selector, disputeGameType); - doi.set(doi.disputeAbsolutePrestate.selector, disputeAbsolutePrestate); - doi.set(doi.disputeMaxGameDepth.selector, disputeMaxGameDepth); - doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth); - doi.set(doi.disputeClockExtension.selector, disputeClockExtension); - doi.set(doi.disputeMaxClockDuration.selector, disputeMaxClockDuration); - - deployOPChain.run(doi, doo); - - // TODO Add fault proof contract assertions below once OPCM fully supports them. + function test_run_succeeds() public { + DeployOPChain.Output memory doo = deployOPChain.run(deployOPChainInput); + // Basic non-zero and code checks are covered inside run->checkOutput. + // Additonal targeted assertions added below. - // Assert that individual input fields were properly set based on the inputs. - assertEq(opChainProxyAdminOwner, doi.opChainProxyAdminOwner(), "100"); - assertEq(systemConfigOwner, doi.systemConfigOwner(), "200"); - assertEq(batcher, doi.batcher(), "300"); - assertEq(unsafeBlockSigner, doi.unsafeBlockSigner(), "400"); - assertEq(proposer, doi.proposer(), "500"); - assertEq(challenger, doi.challenger(), "600"); - assertEq(basefeeScalar, doi.basefeeScalar(), "700"); - assertEq(blobBaseFeeScalar, doi.blobBaseFeeScalar(), "800"); - assertEq(l2ChainId, doi.l2ChainId(), "900"); - assertEq(saltMixer, doi.saltMixer(), "1000"); - assertEq(gasLimit, doi.gasLimit(), "1100"); - assertEq(disputeGameType, GameType.unwrap(doi.disputeGameType()), "1200"); - assertEq(disputeAbsolutePrestate, Claim.unwrap(doi.disputeAbsolutePrestate()), "1300"); - assertEq(disputeMaxGameDepth, doi.disputeMaxGameDepth(), "1400"); - assertEq(disputeSplitDepth, doi.disputeSplitDepth(), "1500"); - assertEq(disputeClockExtension, Duration.unwrap(doi.disputeClockExtension()), "1600"); - assertEq(disputeMaxClockDuration, Duration.unwrap(doi.disputeMaxClockDuration()), "1700"); - - // Assert inputs were properly passed through to the contract initializers. - assertEq(address(doo.opChainProxyAdmin().owner()), opChainProxyAdminOwner, "2100"); - assertEq(address(doo.systemConfigProxy().owner()), systemConfigOwner, "2200"); - address batcherActual = address(uint160(uint256(doo.systemConfigProxy().batcherHash()))); - assertEq(batcherActual, batcher, "2300"); - assertEq(address(doo.systemConfigProxy().unsafeBlockSigner()), unsafeBlockSigner, "2400"); - assertEq(address(doo.permissionedDisputeGame().proposer()), proposer, "2500"); - assertEq(address(doo.permissionedDisputeGame().challenger()), challenger, "2600"); + assertEq(address(doo.permissionedDisputeGame.proposer()), proposer, "PDG proposer"); + assertEq(address(doo.permissionedDisputeGame.challenger()), challenger, "PDG challenger"); + assertEq(doo.permissionedDisputeGame.splitDepth(), disputeSplitDepth, "PDG splitDepth"); + assertEq(doo.permissionedDisputeGame.maxGameDepth(), disputeMaxGameDepth, "PDG maxGameDepth"); + assertEq( + Duration.unwrap(doo.permissionedDisputeGame.clockExtension()), + Duration.unwrap(disputeClockExtension), + "PDG clockExtension" + ); + assertEq( + Duration.unwrap(doo.permissionedDisputeGame.maxClockDuration()), + Duration.unwrap(disputeMaxClockDuration), + "PDG maxClockDuration" + ); + assertEq( + Claim.unwrap(doo.permissionedDisputeGame.absolutePrestate()), + Claim.unwrap(disputeAbsolutePrestate), + "PDG absolutePrestate" + ); + } - // TODO once we deploy the Permissionless Dispute Game - // assertEq(address(doo.faultDisputeGame().proposer()), proposer, "2610"); - // assertEq(address(doo.faultDisputeGame().challenger()), challenger, "2620"); + function testFuzz_run_memory_succeeds(bytes32 _seed) public { + deployOPChainInput.opChainProxyAdminOwner = address(uint160(uint256(hash(_seed, 0)))); + deployOPChainInput.systemConfigOwner = address(uint160(uint256(hash(_seed, 1)))); + deployOPChainInput.batcher = address(uint160(uint256(hash(_seed, 2)))); + deployOPChainInput.unsafeBlockSigner = address(uint160(uint256(hash(_seed, 3)))); + deployOPChainInput.proposer = address(uint160(uint256(hash(_seed, 4)))); + deployOPChainInput.challenger = address(uint160(uint256(hash(_seed, 5)))); + deployOPChainInput.basefeeScalar = uint32(uint256(hash(_seed, 6))); + deployOPChainInput.blobBaseFeeScalar = uint32(uint256(hash(_seed, 7))); + deployOPChainInput.l2ChainId = uint256(hash(_seed, 8)); + + DeployOPChain.Output memory doo = deployOPChain.run(deployOPChainInput); // Verify that the initial bonds are zero. - assertEq(doo.disputeGameFactoryProxy().initBonds(GameTypes.CANNON), 0, "2700"); - assertEq(doo.disputeGameFactoryProxy().initBonds(GameTypes.PERMISSIONED_CANNON), 0, "2800"); + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.CANNON), 0, "2700"); + assertEq(doo.disputeGameFactoryProxy.initBonds(GameTypes.PERMISSIONED_CANNON), 0, "2800"); - (Hash actualRoot,) = doo.anchorStateRegistryProxy().anchors(GameTypes.PERMISSIONED_CANNON); - assertEq(Hash.unwrap(actualRoot), 0xdead000000000000000000000000000000000000000000000000000000000000, "2900"); - assertEq(doo.permissionedDisputeGame().l2BlockNumber(), 0, "3000"); + assertEq(doo.permissionedDisputeGame.l2BlockNumber(), 0, "3000"); assertEq( - Claim.unwrap(doo.permissionedDisputeGame().absolutePrestate()), + Claim.unwrap(doo.permissionedDisputeGame.absolutePrestate()), 0x038512e02c4c3f7bdaec27d00edf55b7155e0905301e1a88083e4e0a6764d54c, "3100" ); - assertEq(Duration.unwrap(doo.permissionedDisputeGame().clockExtension()), 10800, "3200"); - assertEq(Duration.unwrap(doo.permissionedDisputeGame().maxClockDuration()), 302400, "3300"); - assertEq(doo.permissionedDisputeGame().splitDepth(), 30, "3400"); - assertEq(doo.permissionedDisputeGame().maxGameDepth(), 73, "3500"); - - assertEq(address(doo.opChainProxyAdmin().addressManager().owner()), address(doo.opChainProxyAdmin()), "3600"); - assertEq(address(doo.opChainProxyAdmin().addressManager()), address(doo.addressManager()), "3700"); - assertEq(address(doo.opChainProxyAdmin().owner()), opChainProxyAdminOwner, "3800"); + assertEq(Duration.unwrap(doo.permissionedDisputeGame.clockExtension()), 10800, "3200"); + assertEq(Duration.unwrap(doo.permissionedDisputeGame.maxClockDuration()), 302400, "3300"); + assertEq(doo.permissionedDisputeGame.splitDepth(), 30, "3400"); + assertEq(doo.permissionedDisputeGame.maxGameDepth(), 73, "3500"); } function test_customDisputeGame_customEnabled_succeeds() public { - setDOI(); - doi.set(doi.allowCustomDisputeParameters.selector, true); - doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth + 1); - deployOPChain.run(doi, doo); - assertEq(doo.permissionedDisputeGame().splitDepth(), disputeSplitDepth + 1); - } - - function setDOI() internal { - doi.set(doi.opChainProxyAdminOwner.selector, opChainProxyAdminOwner); - doi.set(doi.systemConfigOwner.selector, systemConfigOwner); - doi.set(doi.batcher.selector, batcher); - doi.set(doi.unsafeBlockSigner.selector, unsafeBlockSigner); - doi.set(doi.proposer.selector, proposer); - doi.set(doi.challenger.selector, challenger); - doi.set(doi.basefeeScalar.selector, basefeeScalar); - doi.set(doi.blobBaseFeeScalar.selector, blobBaseFeeScalar); - doi.set(doi.l2ChainId.selector, l2ChainId); - doi.set(doi.opcm.selector, address(opcm)); - doi.set(doi.saltMixer.selector, saltMixer); - doi.set(doi.gasLimit.selector, gasLimit); - doi.set(doi.disputeGameType.selector, disputeGameType); - doi.set(doi.disputeAbsolutePrestate.selector, disputeAbsolutePrestate); - doi.set(doi.disputeMaxGameDepth.selector, disputeMaxGameDepth); - doi.set(doi.disputeSplitDepth.selector, disputeSplitDepth); - doi.set(doi.disputeClockExtension.selector, disputeClockExtension); - doi.set(doi.disputeMaxClockDuration.selector, disputeMaxClockDuration); + deployOPChainInput.allowCustomDisputeParameters = true; + deployOPChainInput.disputeSplitDepth = disputeSplitDepth + 1; + DeployOPChain.Output memory doo = deployOPChain.run(deployOPChainInput); + assertEq(doo.permissionedDisputeGame.splitDepth(), disputeSplitDepth + 1); } } diff --git a/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol b/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol index 52b886eae6a56..694a5b4bbc478 100644 --- a/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol +++ b/packages/contracts-bedrock/test/periphery/TransferOnion.t.sol @@ -67,10 +67,10 @@ contract TransferOnion_Constructor_Test is TransferOnion_TestInit { } } -/// @title TransferOnion_Unclassified_Test +/// @title TransferOnion_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `TransferOnion` /// contract or are testing multiple functions at once. -contract TransferOnion_Unclassified_Test is TransferOnion_TestInit { +contract TransferOnion_Uncategorized_Test is TransferOnion_TestInit { /// @notice Tests unwrapping the onion. function test_unwrap_succeeds() external { // Commit to transferring tiny amounts of tokens diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol index c6ff5f1642757..c891c44f1bfa0 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckBalanceLow.t.sol @@ -45,10 +45,10 @@ contract CheckBalanceLow_Check_Test is CheckBalanceLow_TestInit { } } -/// @title CheckBalanceLow_Unclassified_Test +/// @title CheckBalanceLow_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `CheckBalanceLow` /// contract or are testing multiple functions at once. -contract CheckBalanceLow_Unclassified_Test is CheckBalanceLow_TestInit { +contract CheckBalanceLow_Uncategorized_Test is CheckBalanceLow_TestInit { /// @notice Test that the `name` function returns the correct value. function test_name_succeeds() external view { assertEq(c.name(), "CheckBalanceLow"); diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol index 2e95ea3ad183b..c83fab0e054df 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckSecrets.t.sol @@ -191,10 +191,10 @@ contract CheckSecrets_Reveal_Test is CheckSecrets_TestInit { } } -/// @title CheckSecrets_Unclassified_Test +/// @title CheckSecrets_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `CheckSecrets` contract /// or are testing multiple functions at once. -contract CheckSecrets_Unclassified_Test is CheckSecrets_TestInit { +contract CheckSecrets_Uncategorized_Test is CheckSecrets_TestInit { /// @notice Test that the `name` function returns the correct value. function test_name_succeeds() external view { assertEq(c.name(), "CheckSecrets"); diff --git a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol index f04ccceaa0d96..c787158c631c8 100644 --- a/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol +++ b/packages/contracts-bedrock/test/periphery/drippie/dripchecks/CheckTrue.t.sol @@ -25,10 +25,10 @@ contract CheckTrue_Check_Test is CheckTrue_TestInit { } } -/// @title CheckTrue_Unclassified_Test +/// @title CheckTrue_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `CheckTrue` contract or /// are testing multiple functions at once. -contract CheckTrue_Unclassified_Test is CheckTrue_TestInit { +contract CheckTrue_Uncategorized_Test is CheckTrue_TestInit { /// @notice Test that the `name` function returns the correct value. function test_name_succeeds() external view { assertEq(c.name(), "CheckTrue"); diff --git a/packages/contracts-bedrock/test/safe/DeputyPauseModule.t.sol b/packages/contracts-bedrock/test/safe/DeputyPauseModule.t.sol index ab651861063b3..996502234f27e 100644 --- a/packages/contracts-bedrock/test/safe/DeputyPauseModule.t.sol +++ b/packages/contracts-bedrock/test/safe/DeputyPauseModule.t.sol @@ -17,8 +17,8 @@ import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; contract DeputyPauseModule_TestInit is CommonTest, SafeTestTools { using SafeTestLib for SafeInstance; - event ExecutionFromModuleSuccess(address indexed); - event DeputySet(address indexed); + event ExecutionFromModuleSuccess(address indexed module); + event DeputySet(address indexed deputy); event PauseTriggered(address indexed deputy, bytes32 nonce, address identifier); IDeputyPauseModule deputyPauseModule; @@ -617,10 +617,10 @@ contract DeputyPauseModule_Pause_Test is DeputyPauseModule_TestInit { } } -/// @title L1CrossDomainMessenger_Unclassified_Test +/// @title DeputyPauseModule_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `DeputyPauseModule` /// contract or are testing multiple functions at once. -contract DeputyPauseModule_Unclassified_Test is DeputyPauseModule_TestInit { +contract DeputyPauseModule_Uncategorized_Test is DeputyPauseModule_TestInit { /// @notice Tests that the getters work. function test_getters_works() external view { assertEq(address(deputyPauseModule.guardianSafe()), address(guardianSafeInstance.safe)); diff --git a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol index 266800904f952..00596d59ac768 100644 --- a/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessGuard.t.sol @@ -162,10 +162,10 @@ contract LivenessGuard_ShowLiveness_Test is LivenessGuard_TestInit { } } -/// @title LivenessGuard_Unclassified_Test +/// @title LivenessGuard_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `LivenessGuard` /// contract or are testing multiple functions at once. -contract LivenessGuard_Unclassified_Test is StdCheats, StdUtils, LivenessGuard_TestInit { +contract LivenessGuard_Uncategorized_Test is StdCheats, StdUtils, LivenessGuard_TestInit { using SafeTestLib for SafeInstance; /// @notice Enumerates the possible owner management operations diff --git a/packages/contracts-bedrock/test/safe/LivenessModule.t.sol b/packages/contracts-bedrock/test/safe/LivenessModule.t.sol index b48960e6b279a..9e878c70b898c 100644 --- a/packages/contracts-bedrock/test/safe/LivenessModule.t.sol +++ b/packages/contracts-bedrock/test/safe/LivenessModule.t.sol @@ -603,10 +603,10 @@ contract LivenessModule_RemoveOwners_Test is LivenessModule_TestInit { } } -/// @title LivenessModule_Unclassified_Test +/// @title LivenessModule_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `LivenessModule` /// contract or are testing multiple functions at once. -contract LivenessModule_Unclassified_Test is LivenessModule_TestInit { +contract LivenessModule_Uncategorized_Test is LivenessModule_TestInit { /// @notice Tests if the getters work correctly function test_getters_works() external view { assertEq(address(livenessModule.safe()), address(safeInstance.safe)); diff --git a/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol b/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol new file mode 100644 index 0000000000000..e552e6a00f23c --- /dev/null +++ b/packages/contracts-bedrock/test/safe/LivenessModule2.t.sol @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Test } from "forge-std/Test.sol"; +import { Enum } from "safe-contracts/common/Enum.sol"; +import "test/safe-tools/SafeTestTools.sol"; + +import { LivenessModule2 } from "src/safe/LivenessModule2.sol"; + +/// @title LivenessModule2_TestInit +/// @notice Reusable test initialization for `LivenessModule2` tests. +contract LivenessModule2_TestInit is Test, SafeTestTools { + using SafeTestLib for SafeInstance; + + // Events + event ModuleConfigured(address indexed safe, uint256 livenessResponsePeriod, address fallbackOwner); + event ModuleCleared(address indexed safe); + event ChallengeStarted(address indexed safe, uint256 challengeStartTime); + event ChallengeCancelled(address indexed safe); + event ChallengeSucceeded(address indexed safe, address fallbackOwner); + + uint256 constant INIT_TIME = 10; + uint256 constant CHALLENGE_PERIOD = 7 days; + uint256 constant NUM_OWNERS = 5; + uint256 constant THRESHOLD = 3; + + LivenessModule2 livenessModule2; + SafeInstance safeInstance; + address fallbackOwner; + address[] owners; + uint256[] ownerPKs; + + function setUp() public virtual { + vm.warp(INIT_TIME); + + // Deploy the singleton LivenessModule2 + livenessModule2 = new LivenessModule2(); + + // Create Safe owners + (address[] memory _owners, uint256[] memory _keys) = SafeTestLib.makeAddrsAndKeys("owners", NUM_OWNERS); + owners = _owners; + ownerPKs = _keys; + + // Set up Safe with owners + safeInstance = _setupSafe(ownerPKs, THRESHOLD); + + // Set fallback owner + fallbackOwner = makeAddr("fallbackOwner"); + + // Enable the module on the Safe + SafeTestLib.enableModule(safeInstance, address(livenessModule2)); + } + + /// @notice Helper to enable the LivenessModule2 for a Safe + function _enableModule(SafeInstance memory _safe, uint256 _period, address _fallback) internal { + LivenessModule2.ModuleConfig memory config = + LivenessModule2.ModuleConfig({ livenessResponsePeriod: _period, fallbackOwner: _fallback }); + SafeTestLib.execTransaction( + _safe, + address(livenessModule2), + 0, + abi.encodeCall(LivenessModule2.configureLivenessModule, (config)), + Enum.Operation.Call + ); + } + + /// @notice Helper to disable the LivenessModule2 for a Safe + function _disableModule(SafeInstance memory _safe) internal { + // First disable the module at the Safe level + SafeTestLib.execTransaction( + _safe, + address(_safe.safe), + 0, + abi.encodeCall(ModuleManager.disableModule, (address(0x1), address(livenessModule2))), + Enum.Operation.Call + ); + + // Then clear the module configuration + SafeTestLib.execTransaction( + _safe, + address(livenessModule2), + 0, + abi.encodeCall(LivenessModule2.clearLivenessModule, ()), + Enum.Operation.Call + ); + } + + /// @notice Helper to respond to a challenge from a Safe + function _respondToChallenge(SafeInstance memory _safe) internal { + SafeTestLib.execTransaction( + _safe, address(livenessModule2), 0, abi.encodeCall(LivenessModule2.respond, ()), Enum.Operation.Call + ); + } +} + +/// @title LivenessModule2_Configure_Test +/// @notice Tests configuring and clearing the module +contract LivenessModule2_ConfigureLivenessModule_Test is LivenessModule2_TestInit { + function test_configureLivenessModule_succeeds() external { + vm.expectEmit(true, true, true, true); + emit ModuleConfigured(address(safeInstance.safe), CHALLENGE_PERIOD, fallbackOwner); + + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + + (uint256 period, address fbOwner) = livenessModule2.livenessSafeConfiguration(address(safeInstance.safe)); + assertEq(period, CHALLENGE_PERIOD); + assertEq(fbOwner, fallbackOwner); + assertEq(livenessModule2.challengeStartTime(address(safeInstance.safe)), 0); + } + + function test_configureLivenessModule_multipleSafes_succeeds() external { + // Test that multiple independent safes can configure the module + (, uint256[] memory keys1) = SafeTestLib.makeAddrsAndKeys("safe1", NUM_OWNERS); + SafeInstance memory safe1 = _setupSafe(keys1, THRESHOLD); + SafeTestLib.enableModule(safe1, address(livenessModule2)); + + (, uint256[] memory keys2) = SafeTestLib.makeAddrsAndKeys("safe2", NUM_OWNERS); + SafeInstance memory safe2 = _setupSafe(keys2, THRESHOLD); + SafeTestLib.enableModule(safe2, address(livenessModule2)); + + (, uint256[] memory keys3) = SafeTestLib.makeAddrsAndKeys("safe3", NUM_OWNERS); + SafeInstance memory safe3 = _setupSafe(keys3, THRESHOLD); + SafeTestLib.enableModule(safe3, address(livenessModule2)); + + address fallback1 = makeAddr("fallback1"); + address fallback2 = makeAddr("fallback2"); + address fallback3 = makeAddr("fallback3"); + + // Configure module for each safe + _enableModule(safe1, 1 days, fallback1); + _enableModule(safe2, 2 days, fallback2); + _enableModule(safe3, 3 days, fallback3); + + // Verify each safe has independent configuration + (uint256 period1, address fb1) = livenessModule2.livenessSafeConfiguration(address(safe1.safe)); + assertEq(period1, 1 days); + assertEq(fb1, fallback1); + + (uint256 period2, address fb2) = livenessModule2.livenessSafeConfiguration(address(safe2.safe)); + assertEq(period2, 2 days); + assertEq(fb2, fallback2); + + (uint256 period3, address fb3) = livenessModule2.livenessSafeConfiguration(address(safe3.safe)); + assertEq(period3, 3 days); + assertEq(fb3, fallback3); + } + + function test_configureLivenessModule_requiresSafeModuleInstallation_reverts() external { + // Create a safe that has NOT installed the module at the Safe level + (, uint256[] memory newKeys) = SafeTestLib.makeAddrsAndKeys("newSafe", NUM_OWNERS); + SafeInstance memory newSafe = _setupSafe(newKeys, THRESHOLD); + // Note: we don't call SafeTestLib.enableModule here + + // Now configure should revert because the module is not enabled at the Safe level + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotEnabled.selector); + vm.prank(address(newSafe.safe)); + livenessModule2.configureLivenessModule( + LivenessModule2.ModuleConfig({ livenessResponsePeriod: CHALLENGE_PERIOD, fallbackOwner: fallbackOwner }) + ); + } + + function test_configureLivenessModule_invalidResponsePeriod_reverts() external { + // Test with zero period + vm.expectRevert(LivenessModule2.LivenessModule2_InvalidResponsePeriod.selector); + vm.prank(address(safeInstance.safe)); + livenessModule2.configureLivenessModule( + LivenessModule2.ModuleConfig({ livenessResponsePeriod: 0, fallbackOwner: fallbackOwner }) + ); + } + + function test_configureLivenessModule_invalidFallbackOwner_reverts() external { + // Test with zero address + vm.expectRevert(LivenessModule2.LivenessModule2_InvalidFallbackOwner.selector); + vm.prank(address(safeInstance.safe)); + livenessModule2.configureLivenessModule( + LivenessModule2.ModuleConfig({ livenessResponsePeriod: CHALLENGE_PERIOD, fallbackOwner: address(0) }) + ); + } + + function test_configureLivenessModule_cancelsExistingChallenge_succeeds() external { + // First configure the module + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Verify challenge exists + uint256 challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertGt(challengeEndTime, 0); + + // Reconfigure the module, which should cancel the challenge and emit ChallengeCancelled + vm.expectEmit(true, true, true, true); + emit ChallengeCancelled(address(safeInstance.safe)); + vm.expectEmit(true, true, true, true); + emit ModuleConfigured(address(safeInstance.safe), CHALLENGE_PERIOD * 2, fallbackOwner); + + vm.prank(address(safeInstance.safe)); + livenessModule2.configureLivenessModule( + LivenessModule2.ModuleConfig({ livenessResponsePeriod: CHALLENGE_PERIOD * 2, fallbackOwner: fallbackOwner }) + ); + + // Verify challenge was cancelled + challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertEq(challengeEndTime, 0); + } + + function test_clear_succeeds() external { + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + + // First disable the module at the Safe level + SafeTestLib.execTransaction( + safeInstance, + address(safeInstance.safe), + 0, + abi.encodeCall(ModuleManager.disableModule, (address(0x1), address(livenessModule2))), + Enum.Operation.Call + ); + + vm.expectEmit(true, true, true, true); + emit ModuleCleared(address(safeInstance.safe)); + + // Now clear the configuration + SafeTestLib.execTransaction( + safeInstance, + address(livenessModule2), + 0, + abi.encodeCall(LivenessModule2.clearLivenessModule, ()), + Enum.Operation.Call + ); + + (uint256 period, address fbOwner) = livenessModule2.livenessSafeConfiguration(address(safeInstance.safe)); + assertEq(period, 0); + assertEq(fbOwner, address(0)); + } + + function test_clear_notEnabled_reverts() external { + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotConfigured.selector); + vm.prank(address(safeInstance.safe)); + livenessModule2.clearLivenessModule(); + } + + function test_clear_moduleStillEnabled_reverts() external { + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + + // Try to clear while module is still enabled (should revert) + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleStillEnabled.selector); + vm.prank(address(safeInstance.safe)); + livenessModule2.clearLivenessModule(); + } +} + +/// @title LivenessModule2_Challenge_Test +/// @notice Tests the challenge mechanism +contract LivenessModule2_Challenge_Test is LivenessModule2_TestInit { + function setUp() public override { + super.setUp(); + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + } + + function test_challenge_succeeds() external { + vm.expectEmit(true, true, true, true); + emit ChallengeStarted(address(safeInstance.safe), block.timestamp); + + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + uint256 challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertEq(challengeEndTime, block.timestamp + CHALLENGE_PERIOD); + } + + function test_challenge_notFallbackOwner_reverts() external { + address notFallback = makeAddr("notFallback"); + + vm.expectRevert(LivenessModule2.LivenessModule2_UnauthorizedCaller.selector); + vm.prank(notFallback); + livenessModule2.challenge(address(safeInstance.safe)); + } + + function test_challenge_moduleNotEnabled_reverts() external { + address newSafe = makeAddr("newSafe"); + + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotConfigured.selector); + vm.prank(fallbackOwner); + livenessModule2.challenge(newSafe); + } + + function test_challenge_alreadyExists_reverts() external { + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + vm.expectRevert(LivenessModule2.LivenessModule2_ChallengeAlreadyExists.selector); + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + } + + function test_challenge_moduleDisabledAtSafeLevel_reverts() external { + // Create a Safe, configure it, then disable the module at Safe level + (, uint256[] memory newKeys) = SafeTestLib.makeAddrsAndKeys("disabledSafe", NUM_OWNERS); + SafeInstance memory disabledSafe = _setupSafe(newKeys, THRESHOLD); + + // First enable module at Safe level + SafeTestLib.enableModule(disabledSafe, address(livenessModule2)); + + // Then configure + _enableModule(disabledSafe, CHALLENGE_PERIOD, fallbackOwner); + + // Now disable the module at Safe level (but keep config) + SafeTestLib.execTransaction( + disabledSafe, + address(disabledSafe.safe), + 0, + abi.encodeCall(ModuleManager.disableModule, (address(0x1), address(livenessModule2))), + Enum.Operation.Call + ); + + // Try to challenge - should revert because module is disabled at Safe level + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotEnabled.selector); + vm.prank(fallbackOwner); + livenessModule2.challenge(address(disabledSafe.safe)); + } + + function test_respond_succeeds() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Cancel it + vm.expectEmit(true, true, true, true); + emit ChallengeCancelled(address(safeInstance.safe)); + + _respondToChallenge(safeInstance); + + // Verify challenge is cancelled + uint256 challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertEq(challengeEndTime, 0); + } + + function test_respond_noChallenge_reverts() external { + // Module is already enabled in setUp, no challenge exists + + // Try to cancel when no challenge exists - this should fail + // We need to use a transaction that would work if there was a challenge + // Use safeTxGas > 0 to allow the Safe to handle the revert gracefully + bytes memory data = abi.encodeCall(LivenessModule2.respond, ()); + bool success = SafeTestLib.execTransaction( + safeInstance, + address(livenessModule2), + 0, + data, + Enum.Operation.Call, + 100000, // safeTxGas > 0 allows transaction to fail without reverting + 0, + 0, + address(0), + address(0), + "" + ); + assertFalse(success, "Should fail to cancel non-existent challenge"); + } + + function test_respond_afterResponsePeriod_succeeds() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Warp past challenge period + vm.warp(block.timestamp + CHALLENGE_PERIOD + 1); + + // Should be able to respond even after response period (per new specs) + vm.expectEmit(true, true, true, true); + emit ChallengeCancelled(address(safeInstance.safe)); + vm.prank(address(safeInstance.safe)); + livenessModule2.respond(); + + // Verify challenge was cancelled + assertEq(livenessModule2.challengeStartTime(address(safeInstance.safe)), 0); + } + + function test_respond_moduleNotConfigured_reverts() external { + // Create a Safe that hasn't enabled the module + (, uint256[] memory newKeys) = SafeTestLib.makeAddrsAndKeys("safeThatDidntEnable", NUM_OWNERS); + SafeInstance memory safeThatDidntEnable = _setupSafe(newKeys, THRESHOLD); + // Note: we don't call SafeTestLib.enableModule here + + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotConfigured.selector); + vm.prank(address(safeThatDidntEnable.safe)); + livenessModule2.respond(); + } + + function test_respond_moduleNotEnabled_reverts() external { + // Create a Safe, enable and configure the module, then disable it + (, uint256[] memory newKeys) = SafeTestLib.makeAddrsAndKeys("configuredButDisabled", NUM_OWNERS); + SafeInstance memory configuredSafe = _setupSafe(newKeys, THRESHOLD); + + // First enable module at Safe level + SafeTestLib.enableModule(configuredSafe, address(livenessModule2)); + + // Configure the module (this sets the configuration) + _enableModule(configuredSafe, CHALLENGE_PERIOD, fallbackOwner); + + // Now disable the module at Safe level (but keep config) + SafeTestLib.execTransaction( + configuredSafe, + address(configuredSafe.safe), + 0, + abi.encodeCall(ModuleManager.disableModule, (address(0x1), address(livenessModule2))), + Enum.Operation.Call + ); + + // Verify the Safe still has configuration but module is not enabled + (uint256 period, address fbOwner) = livenessModule2.livenessSafeConfiguration(address(configuredSafe.safe)); + assertTrue(period > 0); // Configuration exists + assertTrue(fbOwner != address(0)); // Configuration exists + assertFalse(configuredSafe.safe.isModuleEnabled(address(livenessModule2))); // Module not enabled + + // Now respond() should revert because module is not enabled + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotEnabled.selector); + vm.prank(address(configuredSafe.safe)); + livenessModule2.respond(); + } +} + +/// @title LivenessModule2_ChangeOwnershipToFallback_Test +/// @notice Tests the ownership transfer after successful challenge +contract LivenessModule2_ChangeOwnershipToFallback_Test is LivenessModule2_TestInit { + function setUp() public override { + super.setUp(); + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + } + + function test_changeOwnershipToFallback_succeeds() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Warp past challenge period + vm.warp(block.timestamp + CHALLENGE_PERIOD + 1); + + // Execute ownership transfer + vm.expectEmit(true, true, true, true); + emit ChallengeSucceeded(address(safeInstance.safe), fallbackOwner); + + vm.prank(fallbackOwner); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + + // Verify ownership changed + address[] memory newOwners = safeInstance.safe.getOwners(); + assertEq(newOwners.length, 1); + assertEq(newOwners[0], fallbackOwner); + assertEq(safeInstance.safe.getThreshold(), 1); + + // Verify challenge is reset + uint256 challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertEq(challengeEndTime, 0); + } + + function test_changeOwnershipToFallback_moduleNotEnabled_reverts() external { + address newSafe = makeAddr("newSafe"); + + vm.prank(fallbackOwner); + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotConfigured.selector); + livenessModule2.changeOwnershipToFallback(newSafe); + } + + function test_changeOwnershipToFallback_noChallenge_reverts() external { + vm.prank(fallbackOwner); + vm.expectRevert(LivenessModule2.LivenessModule2_ChallengeDoesNotExist.selector); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + } + + function test_changeOwnershipToFallback_beforeResponsePeriod_reverts() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Try to execute before response period expires + vm.prank(fallbackOwner); + vm.expectRevert(LivenessModule2.LivenessModule2_ResponsePeriodActive.selector); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + } + + function test_changeOwnershipToFallback_moduleDisabledAtSafeLevel_reverts() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Warp past challenge period + vm.warp(block.timestamp + CHALLENGE_PERIOD + 1); + + // Disable the module at Safe level + SafeTestLib.execTransaction( + safeInstance, + address(safeInstance.safe), + 0, + abi.encodeCall(ModuleManager.disableModule, (address(0x1), address(livenessModule2))), + Enum.Operation.Call + ); + + // Try to execute ownership transfer - should revert because module is disabled at Safe level + vm.prank(fallbackOwner); + vm.expectRevert(LivenessModule2.LivenessModule2_ModuleNotEnabled.selector); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + } + + function test_changeOwnershipToFallback_onlyFallbackOwner_succeeds() external { + // Start a challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + // Warp past challenge period + vm.warp(block.timestamp + CHALLENGE_PERIOD + 1); + + // Try from random address - should fail + address randomCaller = makeAddr("randomCaller"); + vm.prank(randomCaller); + vm.expectRevert(LivenessModule2.LivenessModule2_UnauthorizedCaller.selector); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + + // Execute from fallback owner - should succeed + vm.prank(fallbackOwner); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + + // Verify ownership changed + address[] memory newOwners = safeInstance.safe.getOwners(); + assertEq(newOwners.length, 1); + assertEq(newOwners[0], fallbackOwner); + } + + function test_changeOwnershipToFallback_canRechallenge_succeeds() external { + // Start and execute first challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + vm.warp(block.timestamp + CHALLENGE_PERIOD + 1); + vm.prank(fallbackOwner); + livenessModule2.changeOwnershipToFallback(address(safeInstance.safe)); + + // Start a new challenge (as fallback owner) + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + + uint256 challengeEndTime = livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)); + assertGt(challengeEndTime, 0); + } +} + +/// @title LivenessModule2_GetChallengePeriodEnd_Test +/// @notice Tests the getChallengePeriodEnd function and related view functionality +contract LivenessModule2_GetChallengePeriodEnd_Test is LivenessModule2_TestInit { + function test_safeConfigs_succeeds() external { + // Before enabling + (uint256 period1, address fbOwner1) = livenessModule2.livenessSafeConfiguration(address(safeInstance.safe)); + assertEq(period1, 0); + assertEq(fbOwner1, address(0)); + assertEq(livenessModule2.challengeStartTime(address(safeInstance.safe)), 0); + + // After enabling + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + (uint256 period2, address fbOwner2) = livenessModule2.livenessSafeConfiguration(address(safeInstance.safe)); + assertEq(period2, CHALLENGE_PERIOD); + assertEq(fbOwner2, fallbackOwner); + assertEq(livenessModule2.challengeStartTime(address(safeInstance.safe)), 0); + } + + function test_getChallengePeriodEnd_succeeds() external { + _enableModule(safeInstance, CHALLENGE_PERIOD, fallbackOwner); + + // No challenge + assertEq(livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)), 0); + + // With challenge + vm.prank(fallbackOwner); + livenessModule2.challenge(address(safeInstance.safe)); + assertEq(livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)), block.timestamp + CHALLENGE_PERIOD); + + // After cancellation + _respondToChallenge(safeInstance); + assertEq(livenessModule2.getChallengePeriodEnd(address(safeInstance.safe)), 0); + } + + function test_version_succeeds() external view { + assertTrue(bytes(livenessModule2.version()).length > 0); + } +} diff --git a/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol b/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol new file mode 100644 index 0000000000000..88e12657997d4 --- /dev/null +++ b/packages/contracts-bedrock/test/safe/TimelockGuard.t.sol @@ -0,0 +1,879 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +import { Test } from "forge-std/Test.sol"; +import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; +import { GuardManager } from "safe-contracts/base/GuardManager.sol"; +import "test/safe-tools/SafeTestTools.sol"; + +import { TimelockGuard } from "src/safe/TimelockGuard.sol"; + +using TransactionBuilder for TransactionBuilder.Transaction; + +/// @title TransactionBuilder +/// @notice Facilitates the construction of transactions and signatures, and provides helper methods +/// for scheduling, executing, and cancelling transactions. +library TransactionBuilder { + // A struct type used to construct a transaction for scheduling and execution + struct Transaction { + SafeInstance safeInstance; + TimelockGuard.ExecTransactionParams params; + uint256 nonce; + bytes32 hash; + bytes signatures; + } + + address internal constant VM_ADDR = 0x7109709ECfa91a80626fF3989D68f67F5b1DD12D; + + /// @notice Sets a nonce value on the provided transaction struct. + function setNonce(Transaction memory _tx, uint256 _nonce) internal pure { + _tx.nonce = _nonce; + } + + /// @notice Computes and stores the Safe transaction hash for the struct. + function setHash(Transaction memory _tx) internal view { + _tx.hash = _tx.safeInstance.safe.getTransactionHash({ + to: _tx.params.to, + value: _tx.params.value, + data: _tx.params.data, + operation: _tx.params.operation, + safeTxGas: _tx.params.safeTxGas, + baseGas: _tx.params.baseGas, + gasPrice: _tx.params.gasPrice, + gasToken: _tx.params.gasToken, + refundReceiver: _tx.params.refundReceiver, + _nonce: _tx.nonce + }); + } + + /// @notice Collects signatures from the first `_num` owners for the transaction. + function setSignatures(Transaction memory _tx, uint256 _num) internal pure { + bytes memory signatures = new bytes(0); + for (uint256 i; i < _num; ++i) { + (uint8 v, bytes32 r, bytes32 s) = Vm(VM_ADDR).sign(_tx.safeInstance.ownerPKs[i], _tx.hash); + + // The signature format is a compact form of: {bytes32 r}{bytes32 s}{uint8 v} + signatures = bytes.concat(signatures, abi.encodePacked(r, s, v)); + } + _tx.signatures = signatures; + } + + /// @notice Collects enough signatures to meet the Safe threshold. + function setSignatures(Transaction memory _tx) internal view { + uint256 num = _tx.safeInstance.safe.getThreshold(); + setSignatures(_tx, num); + } + + /// @notice Updates the hash and signatures for a specific approval count. + function updateTransaction(Transaction memory _tx, uint256 _num) internal view { + _tx.setHash(); + _tx.setSignatures(_num); + } + + /// @notice Updates the hash and threshold-based signatures on the transaction. + function updateTransaction(Transaction memory _tx) internal view { + _tx.setHash(); + _tx.setSignatures(); + } + + /// @notice Schedules the transaction with the supplied TimelockGuard instance. + function scheduleTransaction(Transaction memory _tx, TimelockGuard _timelockGuard) internal { + _timelockGuard.scheduleTransaction(_tx.safeInstance.safe, _tx.nonce, _tx.params, _tx.signatures); + } + + /// @notice Executes the transaction via the underlying Safe contract. + function executeTransaction(Transaction memory _tx) internal { + _tx.safeInstance.safe.execTransaction( + _tx.params.to, + _tx.params.value, + _tx.params.data, + _tx.params.operation, + _tx.params.safeTxGas, + _tx.params.baseGas, + _tx.params.gasPrice, + _tx.params.gasToken, + _tx.params.refundReceiver, + _tx.signatures + ); + } + + /// @notice Returns a fresh transaction struct copy with identical fields. + function deepCopy(Transaction memory _tx) internal pure returns (Transaction memory) { + return Transaction({ + safeInstance: _tx.safeInstance, + nonce: _tx.nonce, + params: _tx.params, + signatures: _tx.signatures, + hash: _tx.hash + }); + } + + /// @notice Builds the corresponding cancellation transaction for the provided data. + function makeCancellationTransaction( + Transaction memory _tx, + TimelockGuard _timelockGuard + ) + internal + view + returns (Transaction memory) + { + // Deep copy the transaction + Transaction memory cancellation = Transaction({ + safeInstance: _tx.safeInstance, + nonce: _tx.nonce, + params: _tx.params, + signatures: _tx.signatures, + hash: _tx.hash + }); + + // Empty out the params, then set based on the cancellation transaction format + delete cancellation.params; + cancellation.params.to = address(_timelockGuard); + cancellation.params.data = abi.encodeCall(TimelockGuard.signCancellation, (_tx.hash)); + + // Get only the number of signatures required for the cancellation transaction + uint256 cancellationThreshold = _timelockGuard.cancellationThreshold(_tx.safeInstance.safe); + + cancellation.updateTransaction(cancellationThreshold); + return cancellation; + } +} + +/// @title TimelockGuard_TestInit +/// @notice Reusable test initialization for `TimelockGuard` tests. +contract TimelockGuard_TestInit is Test, SafeTestTools { + // Events + event GuardConfigured(Safe indexed safe, uint256 timelockDelay); + event TransactionScheduled(Safe indexed safe, bytes32 indexed txId, uint256 when); + event TransactionCancelled(Safe indexed safe, bytes32 indexed txId); + event CancellationThresholdUpdated(Safe indexed safe, uint256 oldThreshold, uint256 newThreshold); + event TransactionExecuted(Safe indexed safe, bytes32 txHash); + event Message(string message); + + uint256 constant INIT_TIME = 10; + uint256 constant TIMELOCK_DELAY = 7 days; + uint256 constant NUM_OWNERS = 5; + uint256 constant THRESHOLD = 3; + uint256 constant ONE_YEAR = 365 days; + + TimelockGuard timelockGuard; + + // The Safe address will be the same as SafeInstance.safe, but it has the Safe type. + // This is useful for testing functions that take a Safe as an argument. + Safe safe; + SafeInstance safeInstance; + + SafeInstance unguardedSafe; + + /// @notice Deploys test fixtures and configures default Safe instances. + function setUp() public virtual { + vm.warp(INIT_TIME); + + // Deploy the singleton TimelockGuard + timelockGuard = new TimelockGuard(); + // Set up Safe with owners + safeInstance = _deploySafe("owners", NUM_OWNERS, THRESHOLD); + safe = Safe(payable(safeInstance.safe)); + + // Safe without guard enabled + unguardedSafe = _deploySafe("owners-unguarded", NUM_OWNERS, THRESHOLD); + + // Enable the guard on the Safe + _enableGuard(safeInstance); + } + + /// @notice Deploys a Safe with the given owners and threshold + function _deploySafe( + string memory _prefix, + uint256 _numOwners, + uint256 _threshold + ) + internal + returns (SafeInstance memory) + { + (, uint256[] memory keys) = SafeTestLib.makeAddrsAndKeys(_prefix, _numOwners); + return _setupSafe(keys, _threshold); + } + + /// @notice Builds an empty transaction wrapper for a Safe instance. + function _createEmptyTransaction(SafeInstance memory _safeInstance) + internal + view + returns (TransactionBuilder.Transaction memory) + { + TransactionBuilder.Transaction memory transaction; + // transaction.params will have null values + transaction.safeInstance = _safeInstance; + transaction.nonce = _safeInstance.safe.nonce(); + transaction.updateTransaction(); + return transaction; + } + + /// @notice Creates a dummy transaction populated with placeholder call data. + function _createDummyTransaction(SafeInstance memory _safeInstance) + internal + view + returns (TransactionBuilder.Transaction memory) + { + TransactionBuilder.Transaction memory transaction = _createEmptyTransaction(_safeInstance); + transaction.params.to = address(0xabba); + transaction.params.data = hex"acdc"; + transaction.updateTransaction(); + return transaction; + } + + /// @notice Helper to configure the TimelockGuard for a Safe + function _configureGuard(SafeInstance memory _safe, uint256 _delay) internal { + SafeTestLib.execTransaction( + _safe, address(timelockGuard), 0, abi.encodeCall(TimelockGuard.configureTimelockGuard, (_delay)) + ); + } + + /// @notice Helper to enable guard on a Safe + function _enableGuard(SafeInstance memory _safe) internal { + SafeTestLib.execTransaction( + _safe, address(_safe.safe), 0, abi.encodeCall(GuardManager.setGuard, (address(timelockGuard))) + ); + } +} + +/// @title TimelockGuard_TimelockConfiguration_Test +/// @notice Tests for timelockConfiguration function +contract TimelockGuard_TimelockConfiguration_Test is TimelockGuard_TestInit { + /// @notice Ensures an unconfigured Safe reports a zero timelock delay. + function test_timelockConfiguration_returnsZeroForUnconfiguredSafe_succeeds() external view { + uint256 delay = timelockGuard.timelockConfiguration(safeInstance.safe); + assertEq(delay, 0); + // configured is now determined by timelockDelay == 0 + assertEq(delay == 0, true); + } + + /// @notice Validates the configuration view reflects the stored timelock delay. + function test_timelockConfiguration_returnsConfigurationForConfiguredSafe_succeeds() external { + _configureGuard(safeInstance, TIMELOCK_DELAY); + uint256 delay = timelockGuard.timelockConfiguration(safeInstance.safe); + assertEq(delay, TIMELOCK_DELAY); + // configured is now determined by timelockDelay != 0 + assertEq(delay != 0, true); + } +} + +/// @title TimelockGuard_ConfigureTimelockGuard_Test +/// @notice Tests for configureTimelockGuard function +contract TimelockGuard_ConfigureTimelockGuard_Test is TimelockGuard_TestInit { + /// @notice Verifies the guard can be configured with a standard delay. + function test_configureTimelockGuard_succeeds() external { + vm.expectEmit(true, true, true, true); + emit GuardConfigured(safe, TIMELOCK_DELAY); + + _configureGuard(safeInstance, TIMELOCK_DELAY); + + uint256 delay = timelockGuard.timelockConfiguration(safe); + assertEq(delay, TIMELOCK_DELAY); + // configured is now determined by timelockDelay != 0 + assertEq(delay != 0, true); + } + + /// @notice Confirms delays above the maximum revert during configuration. + function test_configureTimelockGuard_revertsIfDelayTooLong_reverts() external { + uint256 tooLongDelay = ONE_YEAR + 1; + + vm.expectRevert(TimelockGuard.TimelockGuard_InvalidTimelockDelay.selector); + vm.prank(address(safeInstance.safe)); + timelockGuard.configureTimelockGuard(tooLongDelay); + } + + /// @notice Checks configuration reverts when the contract is too old. + function test_configureTimelockGuard_revertsIfVersionTooOld_reverts() external { + // nosemgrep: sol-style-use-abi-encodecall + vm.mockCall(address(safeInstance.safe), abi.encodeWithSignature("VERSION()"), abi.encode("1.2.0")); + vm.expectRevert(TimelockGuard.TimelockGuard_InvalidVersion.selector, address(timelockGuard)); + vm.prank(address(safeInstance.safe)); + timelockGuard.configureTimelockGuard(TIMELOCK_DELAY); + } + + /// @notice Asserts the maximum valid delay configures successfully. + function test_configureTimelockGuard_acceptsMaxValidDelay_succeeds() external { + vm.expectEmit(true, true, true, true); + emit GuardConfigured(safe, ONE_YEAR); + + _configureGuard(safeInstance, ONE_YEAR); + + uint256 delay = timelockGuard.timelockConfiguration(safe); + assertEq(delay, ONE_YEAR); + // configured is now determined by timelockDelay != 0 + assertEq(delay != 0, true); + } + + /// @notice Demonstrates the guard can be reconfigured to a new delay. + function test_configureTimelockGuard_allowsReconfiguration_succeeds() external { + // Initial configuration + _configureGuard(safeInstance, TIMELOCK_DELAY); + assertEq(timelockGuard.timelockConfiguration(safe), TIMELOCK_DELAY); + + uint256 newDelay = TIMELOCK_DELAY + 1; + + // Setup and schedule the reconfiguration transaction + TransactionBuilder.Transaction memory reconfigureGuardTx = _createEmptyTransaction(safeInstance); + reconfigureGuardTx.params.to = address(timelockGuard); + reconfigureGuardTx.params.data = abi.encodeCall(TimelockGuard.configureTimelockGuard, (newDelay)); + reconfigureGuardTx.updateTransaction(); + reconfigureGuardTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + + // Reconfigure with different delay + vm.expectEmit(true, true, true, true); + emit GuardConfigured(safe, newDelay); + + _configureGuard(safeInstance, newDelay); + assertEq(timelockGuard.timelockConfiguration(safe), newDelay); + } + + /// @notice Ensures setting delay to zero clears the configuration. + function test_configureTimelockGuard_clearConfiguration_succeeds() external { + // First configure the guard + _configureGuard(safeInstance, TIMELOCK_DELAY); + assertEq(timelockGuard.timelockConfiguration(safe), TIMELOCK_DELAY); + + // Configure timelock delay to 0 should succeed and emit event + vm.expectEmit(true, true, true, true); + emit GuardConfigured(safe, 0); + vm.prank(address(safeInstance.safe)); + timelockGuard.configureTimelockGuard(0); + + // Timelock delay should be set to 0 + assertEq(timelockGuard.timelockConfiguration(safe), 0); + } + + /// @notice Checks clearing succeeds even if the guard was never configured. + function test_configureTimelockGuard_notConfigured_succeeds() external { + // Try to clear - should succeed even if not yet configured + vm.expectEmit(true, true, true, true); + emit GuardConfigured(safe, 0); + vm.prank(address(safeInstance.safe)); + timelockGuard.configureTimelockGuard(0); + } +} + +/// @title TimelockGuard_CancellationThreshold_Test +/// @notice Tests for cancellationThreshold function +contract TimelockGuard_CancellationThreshold_Test is TimelockGuard_TestInit { + /// @notice Validates cancellation threshold is zero when the guard is disabled. + function test_cancellationThreshold_returnsZeroIfGuardNotEnabled_succeeds() external view { + uint256 threshold = timelockGuard.cancellationThreshold(Safe(payable(unguardedSafe.safe))); + assertEq(threshold, 0); + } + + /// @notice Ensures an enabled but unconfigured guard yields a zero threshold. + function test_cancellationThreshold_returnsZeroIfGuardNotConfigured_succeeds() external view { + // Safe with guard enabled but not configured should return 0 + uint256 threshold = timelockGuard.cancellationThreshold(safe); + assertEq(threshold, 0); + } + + /// @notice Confirms the default threshold becomes one after configuration. + function test_cancellationThreshold_returnsOneAfterConfiguration_succeeds() external { + // Configure the guard + _configureGuard(safeInstance, TIMELOCK_DELAY); + + // Should default to 1 after configuration + uint256 threshold = timelockGuard.cancellationThreshold(safe); + assertEq(threshold, 1); + } + + // Note: Testing increment/decrement behavior will require scheduleTransaction, + // cancelTransaction and execution functions to be implemented first +} + +/// @title TimelockGuard_ScheduleTransaction_Test +/// @notice Tests for scheduleTransaction function +contract TimelockGuard_ScheduleTransaction_Test is TimelockGuard_TestInit { + /// @notice Configures the guard before each scheduleTransaction test. + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + /// @notice Ensures scheduling emits the expected event and stores state. + function test_scheduleTransaction_succeeds() public { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + + vm.expectEmit(true, true, true, true); + emit TransactionScheduled(safe, dummyTx.hash, INIT_TIME + TIMELOCK_DELAY); + dummyTx.scheduleTransaction(timelockGuard); + } + + // A test which demonstrates that if the guard is enabled but not explicitly configured, + // the timelock delay is set to 0. + /// @notice Checks scheduling reverts if the guard lacks configuration. + function test_scheduleTransaction_guardNotConfigured_reverts() external { + // Enable the guard on the unguarded Safe, but don't configure it + _enableGuard(unguardedSafe); + assertEq(timelockGuard.timelockConfiguration(unguardedSafe.safe), 0); + + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(unguardedSafe); + vm.expectRevert(TimelockGuard.TimelockGuard_GuardNotConfigured.selector); + dummyTx.scheduleTransaction(timelockGuard); + } + + /// @notice Verifies rescheduling an identical pending transaction reverts. + function test_scheduleTransaction_reschedulingIdenticalTransaction_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + + timelockGuard.scheduleTransaction(safeInstance.safe, dummyTx.nonce, dummyTx.params, dummyTx.signatures); + + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionAlreadyScheduled.selector); + timelockGuard.scheduleTransaction(dummyTx.safeInstance.safe, dummyTx.nonce, dummyTx.params, dummyTx.signatures); + } + + /// @notice Confirms scheduling fails when the guard has not been enabled. + function test_scheduleTransaction_guardNotEnabled_reverts() external { + // Attempt to schedule a transaction with a Safe that has enabled the guard but + // has not configured it. + _enableGuard(unguardedSafe); + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(unguardedSafe); + + vm.expectRevert(TimelockGuard.TimelockGuard_GuardNotConfigured.selector); + dummyTx.scheduleTransaction(timelockGuard); + } + + /// @notice Demonstrates identical payloads can be scheduled with distinct nonces. + function test_scheduleTransaction_canScheduleIdenticalWithDifferentNonce_succeeds() external { + // Schedule a transaction with a specific nonce + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Schedule an identical transaction with a different nonce (salt) + TransactionBuilder.Transaction memory newTx = dummyTx.deepCopy(); + newTx.nonce = dummyTx.nonce + 1; + newTx.updateTransaction(); + + vm.expectEmit(true, true, true, true); + emit TransactionScheduled(safe, newTx.hash, INIT_TIME + TIMELOCK_DELAY); + timelockGuard.scheduleTransaction(safeInstance.safe, newTx.nonce, newTx.params, newTx.signatures); + } +} + +/// @title TimelockGuard_ScheduledTransaction_Test +/// @notice Tests for scheduledTransaction function +contract TimelockGuard_ScheduledTransaction_Test is TimelockGuard_TestInit { + /// @notice Configures the guard before each scheduleTransaction test. + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + function test_scheduledTransaction_succeeds() external { + // schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + TimelockGuard.ScheduledTransaction memory scheduledTransaction = + timelockGuard.scheduledTransaction(safe, dummyTx.hash); + assertEq(scheduledTransaction.executionTime, INIT_TIME + TIMELOCK_DELAY); + assert(scheduledTransaction.state == TimelockGuard.TransactionState.Pending); + assertEq(keccak256(abi.encode(scheduledTransaction.params)), keccak256(abi.encode(dummyTx.params))); + } +} + +/// @title TimelockGuard_PendingTransactions_Test +/// @notice Tests for pendingTransactions function +contract TimelockGuard_PendingTransactions_Test is TimelockGuard_TestInit { + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + function test_pendingTransactions_succeeds() external { + // schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + TimelockGuard.ScheduledTransaction[] memory pendingTransactions = timelockGuard.pendingTransactions(safe); + assertEq(pendingTransactions.length, 1); + // ensure the hash of the transaction params are the same + assertEq(pendingTransactions[0].params.to, dummyTx.params.to); + assertEq(keccak256(abi.encode(pendingTransactions[0].params)), keccak256(abi.encode(dummyTx.params))); + } + + function test_pendingTransactions_removeTransactionAfterCancellation_succeeds() external { + // schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // cancel the transaction + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + + // get the pending transactions + TimelockGuard.ScheduledTransaction[] memory pendingTransactions = timelockGuard.pendingTransactions(safe); + assertEq(pendingTransactions.length, 0); + } + + function test_pendingTransactions_removeTransactionAfterExecution_succeeds() external { + // schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + + // execute the transaction + dummyTx.executeTransaction(); + + // get the pending transactions + TimelockGuard.ScheduledTransaction[] memory pendingTransactions = timelockGuard.pendingTransactions(safe); + assertEq(pendingTransactions.length, 0); + } +} + +/// @title TimelockGuard_signCancellation_Test +/// @notice Tests for signCancellation function +contract TimelockGuard_signCancellation_Test is TimelockGuard_TestInit { + function test_signCancellation_succeeds() external { + vm.expectEmit(true, true, true, true); + emit Message("This function is not meant to be called, did you mean to call cancelTransaction?"); + timelockGuard.signCancellation(bytes32(0)); + } +} + +contract TimelockGuard_CancelTransaction_Test is TimelockGuard_TestInit { + /// @notice Prepares a configured guard before cancellation tests run. + function setUp() public override { + super.setUp(); + + // Configure the guard and schedule a transaction + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + /// @notice Ensures cancellations succeed using owner signatures. + function test_cancelTransaction_withPrivKeySignature_succeeds() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Get the cancellation transaction + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + uint256 cancellationThreshold = timelockGuard.cancellationThreshold(dummyTx.safeInstance.safe); + + // Cancel the transaction + vm.expectEmit(true, true, true, true); + emit CancellationThresholdUpdated(safeInstance.safe, cancellationThreshold, cancellationThreshold + 1); + vm.expectEmit(true, true, true, true); + emit TransactionCancelled(safeInstance.safe, dummyTx.hash); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + + assert( + timelockGuard.scheduledTransaction(safeInstance.safe, dummyTx.hash).state + == TimelockGuard.TransactionState.Cancelled + ); + } + + /// @notice Confirms pre-approved hashes can authorise cancellations. + function test_cancelTransaction_withApproveHash_succeeds() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Get the cancellation transaction hash + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + + // Get the owner + address owner = dummyTx.safeInstance.safe.getOwners()[0]; + + // Approve the cancellation transaction hash + vm.prank(owner); + safeInstance.safe.approveHash(cancellationTx.hash); + + // Encode the prevalidated cancellation signature + bytes memory cancellationSignatures = abi.encodePacked(bytes32(uint256(uint160(owner))), bytes32(0), uint8(1)); + + // Get the cancellation threshold + uint256 cancellationThreshold = timelockGuard.cancellationThreshold(dummyTx.safeInstance.safe); + + // Cancel the transaction + vm.expectEmit(true, true, true, true); + emit CancellationThresholdUpdated(dummyTx.safeInstance.safe, cancellationThreshold, cancellationThreshold + 1); + vm.expectEmit(true, true, true, true); + emit TransactionCancelled(dummyTx.safeInstance.safe, dummyTx.hash); + timelockGuard.cancelTransaction(dummyTx.safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationSignatures); + + // Confirm that the transaction is cancelled + TimelockGuard.ScheduledTransaction memory scheduledTransaction = + timelockGuard.scheduledTransaction(dummyTx.safeInstance.safe, dummyTx.hash); + assert(scheduledTransaction.state == TimelockGuard.TransactionState.Cancelled); + } + + /// @notice Verifies cancelling an unscheduled transaction reverts. + function test_cancelTransaction_revertsIfTransactionNotScheduled_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + + // Attempt to cancel the transaction + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionNotScheduled.selector); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + } +} + +/// @title TimelockGuard_CheckTransaction_Test +/// @notice Tests for checkTransaction function +contract TimelockGuard_CheckTransaction_Test is TimelockGuard_TestInit { + /// @notice Establishes the configured guard before checkTransaction tests. + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + /// @notice Test that checkTransaction reverts when scheduled transaction delay hasn't passed + function test_checkTransaction_scheduledTransactionNotReady_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + + // Schedule the transaction but do not advance time past the timelock delay + dummyTx.scheduleTransaction(timelockGuard); + + // Increment the nonce, as would normally happen when the transaction is executed + vm.store(address(safeInstance.safe), bytes32(uint256(5)), bytes32(uint256(safeInstance.safe.nonce() + 1))); + + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionNotReady.selector); + vm.prank(address(safeInstance.safe)); + timelockGuard.checkTransaction( + dummyTx.params.to, + dummyTx.params.value, + dummyTx.params.data, + dummyTx.params.operation, + dummyTx.params.safeTxGas, + dummyTx.params.baseGas, + dummyTx.params.gasPrice, + dummyTx.params.gasToken, + dummyTx.params.refundReceiver, + "", + address(0) + ); + } + + /// @notice Test that checkTransaction reverts when scheduled transaction was cancelled + function test_checkTransaction_scheduledTransactionCancelled_reverts() external { + // Schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Cancel the transaction + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + + // Fast forward past the timelock delay + vm.warp(block.timestamp + TIMELOCK_DELAY); + // Increment the nonce, as would normally happen when the transaction is executed + vm.store(address(safeInstance.safe), bytes32(uint256(5)), bytes32(uint256(safeInstance.safe.nonce() + 1))); + + // Should revert because transaction was cancelled + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionAlreadyCancelled.selector); + vm.prank(address(safeInstance.safe)); + timelockGuard.checkTransaction( + dummyTx.params.to, + dummyTx.params.value, + dummyTx.params.data, + dummyTx.params.operation, + dummyTx.params.safeTxGas, + dummyTx.params.baseGas, + dummyTx.params.gasPrice, + dummyTx.params.gasToken, + dummyTx.params.refundReceiver, + "", + address(0) + ); + } + + /// @notice Test that checkTransaction reverts when a transaction has not been scheduled + function test_checkTransaction_transactionNotScheduled_reverts() external { + // Get transaction parameters but don't schedule the transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + + // Should revert because transaction was not scheduled + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionNotScheduled.selector); + vm.prank(address(safeInstance.safe)); + timelockGuard.checkTransaction( + dummyTx.params.to, + dummyTx.params.value, + dummyTx.params.data, + dummyTx.params.operation, + dummyTx.params.safeTxGas, + dummyTx.params.baseGas, + dummyTx.params.gasPrice, + dummyTx.params.gasToken, + dummyTx.params.refundReceiver, + "", + address(0) + ); + } +} + +/// @title TimelockGuard_MaxCancellationThreshold_Test +/// @notice Tests for the maxCancellationThreshold function in TimelockGuard +contract TimelockGuard_MaxCancellationThreshold_Test is TimelockGuard_TestInit { + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + /// @notice Test that maxCancellationThreshold returns the correct value + function test_maxCancellationThreshold_maxThresholdIsBlockingThreshold_succeeds() external { + // create a new Safe with 7 owners and quorum of 5 (blocking threshold is 3) + SafeInstance memory newSafeInstance = _deploySafe("owners", 7, 5); + _enableGuard(newSafeInstance); + _configureGuard(newSafeInstance, TIMELOCK_DELAY); + + // Set up a dummy transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(newSafeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Calculate expected max cancellation threshold + uint256 blockingThreshold = newSafeInstance.safe.getOwners().length - newSafeInstance.safe.getThreshold() + 1; + uint256 quorum = newSafeInstance.safe.getThreshold(); + + // Ensure that the minimum is set by the blocking threshold + assertGt(quorum, blockingThreshold); + + // Assert that the maxCancellationThreshold function returns the expected value + assertEq(timelockGuard.maxCancellationThreshold(newSafeInstance.safe), blockingThreshold); + } + + /// @notice Test that maxCancellationThreshold returns the correct value + function test_maxCancellationThreshold_maxThresholdIsQuorum_succeeds() external { + // create a new Safe with 7 owners and quorum of 3 (blocking threshold is 5) + SafeInstance memory newSafeInstance = _deploySafe("owners", 7, 3); + _enableGuard(newSafeInstance); + _configureGuard(newSafeInstance, TIMELOCK_DELAY); + + // Set up a dummy transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(newSafeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + // Calculate expected max cancellation threshold + uint256 blockingThreshold = newSafeInstance.safe.getOwners().length - newSafeInstance.safe.getThreshold() + 1; + uint256 quorum = newSafeInstance.safe.getThreshold(); + + // Ensure that the minimum is set by quorum + assertGt(blockingThreshold, quorum); + + // Assert that the maxCancellationThreshold function returns the expected value + assertEq(timelockGuard.maxCancellationThreshold(newSafeInstance.safe), quorum); + } +} + +/// @title TimelockGuard_Integration_Test +/// @notice Tests for integration between TimelockGuard and Safe +contract TimelockGuard_Integration_Test is TimelockGuard_TestInit { + using stdStorage for StdStorage; + + function setUp() public override { + super.setUp(); + _configureGuard(safeInstance, TIMELOCK_DELAY); + } + + /// @notice Test that scheduling a transaction and then executing it succeeds + function test_integration_scheduleThenExecute_succeeds() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + + // increment the cancellation threshold so that we can test that it is reset + uint256 slot = stdstore.target(address(timelockGuard)).sig("cancellationThreshold(address)").with_key( + address(safeInstance.safe) + ).find(); + vm.store( + address(timelockGuard), + bytes32(slot), + bytes32(uint256(timelockGuard.cancellationThreshold(safeInstance.safe) + 1)) + ); + + vm.expectEmit(true, true, true, true); + emit TransactionExecuted(safeInstance.safe, dummyTx.hash); + dummyTx.executeTransaction(); + + // Confirm that the transaction is executed + TimelockGuard.ScheduledTransaction memory scheduledTransaction = + timelockGuard.scheduledTransaction(safeInstance.safe, dummyTx.hash); + assert(scheduledTransaction.state == TimelockGuard.TransactionState.Executed); + + // Confirm that the cancellation threshold is reset + assertEq(timelockGuard.cancellationThreshold(safeInstance.safe), 1); + } + + /// @notice Test that scheduling a transaction and then executing it twice reverts + function test_integration_scheduleThenExecuteTwice_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + dummyTx.executeTransaction(); + + vm.expectRevert("GS026"); + dummyTx.executeTransaction(); + } + + function test_integration_scheduleThenExecuteThenCancel_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + dummyTx.executeTransaction(); + + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionAlreadyExecuted.selector); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + } + + /// @notice Test that rescheduling an identical previously cancelled transaction reverts + function test_integration_scheduleTransactionIdenticalToPreviouslyCancelled_reverts() external { + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + dummyTx.scheduleTransaction(timelockGuard); + + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + + vm.expectRevert(TimelockGuard.TimelockGuard_TransactionAlreadyScheduled.selector); + dummyTx.scheduleTransaction(timelockGuard); + } + + /// @notice Test that the guard can be reset while still enabled, and then can be disabled + function test_integration_resetThenDisableGuard_succeeds() external { + TransactionBuilder.Transaction memory resetGuardTx = _createEmptyTransaction(safeInstance); + resetGuardTx.params.to = address(timelockGuard); + resetGuardTx.params.data = abi.encodeCall(TimelockGuard.configureTimelockGuard, (0)); + resetGuardTx.updateTransaction(); + resetGuardTx.scheduleTransaction(timelockGuard); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + resetGuardTx.executeTransaction(); + + TransactionBuilder.Transaction memory disableGuardTx = _createEmptyTransaction(safeInstance); + disableGuardTx.params.to = address(safeInstance.safe); + disableGuardTx.params.data = abi.encodeCall(GuardManager.setGuard, (address(0))); + disableGuardTx.updateTransaction(); + + vm.warp(block.timestamp + TIMELOCK_DELAY); + disableGuardTx.executeTransaction(); + } + + /// @notice Test that the max cancellation threshold is not exceeded + function test_integration_maxCancellationThresholdNotExceeded_succeeds() external { + uint256 maxThreshold = timelockGuard.maxCancellationThreshold(safeInstance.safe); + + // Schedule a transaction + TransactionBuilder.Transaction memory dummyTx = _createDummyTransaction(safeInstance); + + // schedule and cancel the transaction maxThreshold + 1 times + for (uint256 i = 0; i < maxThreshold + 1; i++) { + // modify the calldata slightly to make the txHash different + dummyTx.params.data = bytes.concat(dummyTx.params.data, abi.encodePacked(i)); + dummyTx.updateTransaction(); + dummyTx.scheduleTransaction(timelockGuard); + + // Cancel the transaction + TransactionBuilder.Transaction memory cancellationTx = dummyTx.makeCancellationTransaction(timelockGuard); + timelockGuard.cancelTransaction(safeInstance.safe, dummyTx.hash, dummyTx.nonce, cancellationTx.signatures); + } + + assertEq(timelockGuard.cancellationThreshold(safeInstance.safe), maxThreshold); + } +} diff --git a/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol b/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol index ff1506d1c557a..ff175d3f6b2e3 100644 --- a/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol +++ b/packages/contracts-bedrock/test/scripts/DeployOwnership.t.sol @@ -12,8 +12,7 @@ import { Test } from "forge-std/Test.sol"; import { GnosisSafe as Safe } from "safe-contracts/GnosisSafe.sol"; import { ModuleManager } from "safe-contracts/base/ModuleManager.sol"; -import { LivenessGuard } from "src/safe/LivenessGuard.sol"; -import { LivenessModule } from "src/safe/LivenessModule.sol"; +import { LivenessModule2 } from "src/safe/LivenessModule2.sol"; contract DeployOwnershipTest is Test, DeployOwnership { address internal constant SENTINEL_MODULES = address(0x1); @@ -60,35 +59,22 @@ contract DeployOwnershipTest is Test, DeployOwnership { _checkSafeConfig(exampleSecurityCouncilConfig.safeConfig, securityCouncilSafe); - // Guard Checks - address livenessGuard = artifacts.mustGetAddress("LivenessGuard"); - - // The Safe's getGuard method is internal, so we read directly from storage - // https://github.com/safe-global/safe-contracts/blob/v1.4.0/contracts/base/GuardManager.sol#L66-L72 - assertEq(vm.load(address(securityCouncilSafe), GUARD_STORAGE_SLOT), bytes32(uint256(uint160(livenessGuard)))); - - // check that all the owners have a lastLive time in the Guard - address[] memory owners = exampleSecurityCouncilConfig.safeConfig.owners; - for (uint256 i = 0; i < owners.length; i++) { - assertEq(LivenessGuard(livenessGuard).lastLive(owners[i]), block.timestamp); - } - // Module Checks - address livenessModule = artifacts.mustGetAddress("LivenessModule"); + address livenessModule = artifacts.mustGetAddress("LivenessModule2"); (address[] memory modules, address nextModule) = ModuleManager(securityCouncilSafe).getModulesPaginated(SENTINEL_MODULES, 2); assertEq(modules.length, 1); assertEq(modules[0], livenessModule); assertEq(nextModule, SENTINEL_MODULES); // ensures there are no more modules in the list - // LivenessModule checks + // LivenessModule2 checks LivenessModuleConfig memory lmConfig = exampleSecurityCouncilConfig.livenessModuleConfig; - assertEq(address(LivenessModule(livenessModule).livenessGuard()), livenessGuard); - assertEq(LivenessModule(livenessModule).livenessInterval(), lmConfig.livenessInterval); - assertEq(LivenessModule(livenessModule).thresholdPercentage(), lmConfig.thresholdPercentage); - assertEq(LivenessModule(livenessModule).minOwners(), lmConfig.minOwners); + (uint256 configuredPeriod, address configuredFallback) = + LivenessModule2(livenessModule).livenessSafeConfiguration(address(securityCouncilSafe)); + assertEq(configuredPeriod, lmConfig.livenessInterval); + assertEq(configuredFallback, lmConfig.fallbackOwner); - // Ensure the threshold on the safe agrees with the LivenessModule's required threshold - assertEq(securityCouncilSafe.getThreshold(), LivenessModule(livenessModule).getRequiredThreshold(owners.length)); + // Verify no active challenge exists initially + assertEq(LivenessModule2(livenessModule).getChallengePeriodEnd(address(securityCouncilSafe)), 0); } } diff --git a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol index 05a6c4ad115a1..0c355899ddf10 100644 --- a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol +++ b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol @@ -1,8 +1,8 @@ // SPDX-License-Identifier: MIT pragma solidity 0.8.15; -// Foundry -import { VmSafe } from "forge-std/Vm.sol"; +// Libraries +import { LibString } from "@solady/utils/LibString.sol"; // Tests import { OPContractsManager_TestInit } from "test/L1/OPContractsManager.t.sol"; @@ -11,7 +11,7 @@ import { OPContractsManager_TestInit } from "test/L1/OPContractsManager.t.sol"; import { VerifyOPCM } from "scripts/deploy/VerifyOPCM.s.sol"; // Interfaces -import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; +import { IOPContractsManager, IOPContractsManagerUpgrader } from "interfaces/L1/IOPContractsManager.sol"; contract VerifyOPCM_Harness is VerifyOPCM { function loadArtifactInfo(string memory _artifactPath) public view returns (ArtifactInfo memory) { @@ -36,6 +36,26 @@ contract VerifyOPCM_Harness is VerifyOPCM { function buildArtifactPath(string memory _contractName) public view returns (string memory) { return _buildArtifactPath(_contractName); } + + function verifyContractsContainerConsistency(OpcmContractRef[] memory _propRefs) public view { + return _verifyContractsContainerConsistency(_propRefs); + } + + function verifyOpcmImmutableVariables(IOPContractsManager _opcm) public returns (bool) { + return _verifyOpcmImmutableVariables(_opcm); + } + + function validateAllGettersAccounted() public { + return _validateAllGettersAccounted(); + } + + function setExpectedGetter(string memory _getter, string memory _verificationMethod) public { + expectedGetters[_getter] = _verificationMethod; + } + + function removeExpectedGetter(string memory _getter) public { + expectedGetters[_getter] = ""; + } } /// @title VerifyOPCM_TestInit @@ -43,23 +63,21 @@ contract VerifyOPCM_Harness is VerifyOPCM { contract VerifyOPCM_TestInit is OPContractsManager_TestInit { VerifyOPCM_Harness internal harness; - function setUp() public override { + function setUp() public virtual override { super.setUp(); harness = new VerifyOPCM_Harness(); harness.setUp(); } - - /// @notice Skips if running in coverage mode. - function skipIfCoverage() public { - if (vm.isContext(VmSafe.ForgeContext.Coverage)) { - vm.skip(true); - } - } } /// @title VerifyOPCM_Run_Test /// @notice Tests the `run` function of the `VerifyOPCM` script. contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { + function setUp() public override { + super.setUp(); + setupEnvVars(); + } + /// @notice Tests that the script succeeds when no changes are introduced. function test_run_succeeds() public { // Coverage changes bytecode and causes failures, skip. @@ -69,6 +87,29 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { harness.run(address(opcm), true); } + function test_run_bitmapNotEmptyOnMainnet_reverts(bytes32 _devFeatureBitmap) public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Anything but zero! + _devFeatureBitmap = bytes32(bound(uint256(_devFeatureBitmap), 1, type(uint256).max)); + + // Mock opcm to return a non-zero dev feature bitmap. + vm.mockCall( + address(opcm), abi.encodeCall(IOPContractsManager.devFeatureBitmap, ()), abi.encode(_devFeatureBitmap) + ); + + // Set the chain ID to 1. + vm.chainId(1); + + // Disable testing environment. + vm.etch(address(0xbeefcafe), bytes("")); + + // Run the script. + vm.expectRevert(VerifyOPCM.VerifyOPCM_DevFeatureBitmapNotEmpty.selector); + harness.run(address(opcm), true); + } + /// @notice Tests that the script succeeds when differences are introduced into the immutable /// variables of implementation contracts. Fuzzing is too slow here, randomness is good /// enough. @@ -223,4 +264,192 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { vm.expectRevert(VerifyOPCM.VerifyOPCM_Failed.selector); harness.run(address(opcm), true); } + + /// @notice Tests that the script verifies all component contracts have the same contractsContainer address. + function test_verifyContractsContainerConsistency_succeeds() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // This should succeed with the current setup where all contracts have the same containerAddress. + harness.verifyContractsContainerConsistency(propRefs); + } + + /// @notice Tests that the script reverts when contracts have different contractsContainer addresses. + function test_verifyContractsContainerConsistency_mismatch_reverts() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // Create a different address to simulate a mismatch. + address differentContainer = address(0x9999999999999999999999999999999999999999); + + // Mock the first OPCM component found to return a different contractsContainer address + _mockFirstOpcmComponent(propRefs, differentContainer); + + // Now the consistency check should fail. + vm.expectRevert(VerifyOPCM.VerifyOPCM_ContractsContainerMismatch.selector); + harness.verifyContractsContainerConsistency(propRefs); + } + + /// @notice Tests that each OPCM component can be individually tested for container mismatch. + function test_verifyContractsContainerConsistency_eachComponent_reverts() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Get the property references (which include the component addresses) + VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); + + // Test each OPCM component individually (only those that actually have contractsContainer()) + address differentContainer = address(0x9999999999999999999999999999999999999999); + + uint256 componentsWithContainerTested = 0; + for (uint256 i = 0; i < propRefs.length; i++) { + string memory field = propRefs[i].field; + if (_hasContractsContainer(field)) { + // Mock this specific component to return a different address + vm.mockCall( + propRefs[i].addr, + abi.encodeCall(IOPContractsManagerUpgrader.contractsContainer, ()), + abi.encode(differentContainer) + ); + + // The consistency check should fail + vm.expectRevert(VerifyOPCM.VerifyOPCM_ContractsContainerMismatch.selector); + harness.verifyContractsContainerConsistency(propRefs); + + // Clear the mock for next iteration + vm.clearMockedCalls(); + componentsWithContainerTested++; + } + } + + // Ensure we actually tested some components (currently: deployer, gameTypeAdder, upgrader, interopMigrator) + assertGt(componentsWithContainerTested, 0, "Should have tested at least one component"); + } + + /// @notice Utility function to mock the first OPCM component's contractsContainer address. + /// @param _propRefs Array of property references to search through. + /// @param _mockAddress The address to mock the contractsContainer call to return. + function _mockFirstOpcmComponent(VerifyOPCM.OpcmContractRef[] memory _propRefs, address _mockAddress) internal { + for (uint256 i = 0; i < _propRefs.length; i++) { + string memory field = _propRefs[i].field; + // Check if this is an OPCM component that has contractsContainer() + if (_hasContractsContainer(field)) { + vm.mockCall( + _propRefs[i].addr, + abi.encodeCall(IOPContractsManagerUpgrader.contractsContainer, ()), + abi.encode(_mockAddress) + ); + return; + } + } + } + + /// @notice Helper function to check if a field represents an OPCM component. + /// @param _field The field name to check. + /// @return True if the field represents an OPCM component (starts with "opcm"), false otherwise. + function _isOpcmComponent(string memory _field) internal pure returns (bool) { + return LibString.startsWith(_field, "opcm"); + } + + /// @notice Helper function to check if a field represents an OPCM component that has contractsContainer(). + /// @param _field The field name to check. + /// @return True if the field represents an OPCM component with contractsContainer(), false otherwise. + function _hasContractsContainer(string memory _field) internal pure returns (bool) { + // Check if it starts with "opcm" + if (!LibString.startsWith(_field, "opcm")) { + return false; + } + + // Components that start with "opcm" but don't extend OPContractsManagerBase (and thus don't have + // contractsContainer()) + string[] memory exclusions = new string[](1); + exclusions[0] = "opcmStandardValidator"; + + // Check if the field is in the exclusion list + for (uint256 i = 0; i < exclusions.length; i++) { + if (LibString.eq(_field, exclusions[i])) { + return false; + } + } + + return true; + } + + /// @notice Tests that immutable variables are correctly verified in the OPCM contract. + function test_verifyOpcmImmutableVariables_succeeds() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Ensure environment variables are set correctly (in case other tests modified them) + setupEnvVars(); + + // Test that the immutable variables are correctly verified. + // Environment variables are set in setUp() to match the actual OPCM addresses. + bool result = harness.verifyOpcmImmutableVariables(opcm); + assertTrue(result, "OPCM immutable variables should be valid"); + } + + /// @notice Mocks a call to the OPCM contract and verifies validation fails. + /// @param _selector The function selector for the OPCM contract method to mock. + function _assertOnOpcmGetter(bytes4 _selector) internal { + bytes memory callData = abi.encodePacked(_selector); + vm.mockCall(address(opcm), callData, abi.encode(address(0x8888))); + + // Verify that immutable variables fail validation + bool result = harness.verifyOpcmImmutableVariables(opcm); + assertFalse(result, "OPCM with invalid immutable variables should fail verification"); + + // Clear mock calls and restore original environment variables to avoid test isolation issues + vm.clearMockedCalls(); + } + + /// @notice Tests that the script fails when OPCM immutable variables are invalid. + /// We test this by setting expected addresses and mocking OPCM methods to return different addresses. + function test_verifyOpcmImmutableVariables_mismatch_fails() public { + // Coverage changes bytecode and causes failures, skip. + skipIfCoverage(); + + // Set expected addresses via environment variables + address expectedSuperchainConfig = address(0x1111); + address expectedProtocolVersions = address(0x2222); + address expectedSuperchainProxyAdmin = address(0x3333); + + vm.setEnv("EXPECTED_SUPERCHAIN_CONFIG", vm.toString(expectedSuperchainConfig)); + vm.setEnv("EXPECTED_PROTOCOL_VERSIONS", vm.toString(expectedProtocolVersions)); + vm.setEnv("EXPECTED_SUPERCHAIN_PROXY_ADMIN", vm.toString(expectedSuperchainProxyAdmin)); + + // Test that mocking each individual getter causes verification to fail + _assertOnOpcmGetter(IOPContractsManager.superchainConfig.selector); + _assertOnOpcmGetter(IOPContractsManager.protocolVersions.selector); + _assertOnOpcmGetter(IOPContractsManager.superchainProxyAdmin.selector); + + // Reset environment variables to correct values (as set in setUp()) + setupEnvVars(); + } + + /// @notice Tests that the ABI getter validation succeeds when all getters are accounted for. + function test_validateAllGettersAccounted_succeeds() public { + // This should succeed as setUp() configures all expected getters + harness.validateAllGettersAccounted(); + } + + /// @notice Tests that the ABI getter validation fails when there are unaccounted getters. + /// We test this by removing an expected getter from the mapping. + function test_validateAllGettersAccounted_unaccountedGetters_reverts() public { + // Remove one of the expected getters to simulate an unaccounted getter + harness.removeExpectedGetter("blueprints"); + + // This should revert with VerifyOPCM_UnaccountedGetters error + // The error includes the array of unaccounted getters as a parameter + string[] memory expectedUnaccounted = new string[](1); + expectedUnaccounted[0] = "blueprints"; + vm.expectRevert(abi.encodeWithSelector(VerifyOPCM.VerifyOPCM_UnaccountedGetters.selector, expectedUnaccounted)); + harness.validateAllGettersAccounted(); + } } diff --git a/packages/contracts-bedrock/test/setup/CommonTest.sol b/packages/contracts-bedrock/test/setup/CommonTest.sol index 964437674c4b8..4cfbf06b5693c 100644 --- a/packages/contracts-bedrock/test/setup/CommonTest.sol +++ b/packages/contracts-bedrock/test/setup/CommonTest.sol @@ -56,6 +56,11 @@ contract CommonTest is Test, Setup, Events { // changes will not be persisted into the new network. Setup.setUp(); + // Set the code for 0xbeefcafe to a single non-zero byte. We use this address as a signal + // that something is running in the testing environment and not production, useful for + // forked tests. + vm.etch(address(0xbeefcafe), bytes(hex"01")); + alice = makeAddr("alice"); bob = makeAddr("bob"); vm.deal(alice, 10000 ether); diff --git a/packages/contracts-bedrock/test/setup/FeatureFlags.sol b/packages/contracts-bedrock/test/setup/FeatureFlags.sol new file mode 100644 index 0000000000000..7ab1e36853909 --- /dev/null +++ b/packages/contracts-bedrock/test/setup/FeatureFlags.sol @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing +import { console2 as console } from "forge-std/console2.sol"; +import { Vm } from "forge-std/Vm.sol"; + +// Libraries +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { Config } from "scripts/libraries/Config.sol"; + +// Interfaces +import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; + +/// @notice FeatureFlags manages the feature bitmap by either direct user input or via environment +/// variables. +contract FeatureFlags { + /// @notice The address of the foundry Vm contract. + Vm private constant vm = Vm(0x7109709ECfa91a80626fF3989D68f67F5b1DD12D); + + /// @notice The development feature bitmap. + bytes32 internal devFeatureBitmap; + + /// @notice The address of the SystemConfig contract. + ISystemConfig internal sysCfg; + + /// @notice Sets the address of the SystemConfig contract. + /// @param _sysCfg The address of the SystemConfig contract. + function setSystemConfig(ISystemConfig _sysCfg) public { + sysCfg = _sysCfg; + } + + /// @notice Resolves the development feature bitmap. + function resolveFeaturesFromEnv() public { + if (Config.devFeatureInterop()) { + console.log("Setup: DEV_FEATURE__OPTIMISM_PORTAL_INTEROP is enabled"); + devFeatureBitmap |= DevFeatures.OPTIMISM_PORTAL_INTEROP; + } + if (Config.devFeatureCannonKona()) { + console.log("Setup: DEV_FEATURE__CANNON_KONA is enabled"); + devFeatureBitmap |= DevFeatures.CANNON_KONA; + } + if (Config.devFeatureDeployV2DisputeGames()) { + console.log("Setup: DEV_FEATURE__DEPLOY_V2_DISPUTE_GAMES is enabled"); + devFeatureBitmap |= DevFeatures.DEPLOY_V2_DISPUTE_GAMES; + } + } + + /// @notice Enables a feature. + /// @param _feature The feature to set. + function setDevFeatureEnabled(bytes32 _feature) public { + devFeatureBitmap |= _feature; + } + + /// @notice Disables a feature. + /// @param _feature The feature to set. + function setDevFeatureDisabled(bytes32 _feature) public { + devFeatureBitmap &= ~_feature; + } + + /// @notice Checks if a system feature is enabled. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isSysFeatureEnabled(bytes32 _feature) public view returns (bool) { + return sysCfg.isFeatureEnabled(_feature); + } + + /// @notice Checks if a development feature is enabled. + /// @param _feature The feature to check. + /// @return True if the feature is enabled, false otherwise. + function isDevFeatureEnabled(bytes32 _feature) public view returns (bool) { + return DevFeatures.isDevFeatureEnabled(devFeatureBitmap, _feature); + } + + /// @notice Skips tests when the provided system feature is enabled. + /// @param _feature The feature to check. + function skipIfSysFeatureEnabled(bytes32 _feature) public { + if (isSysFeatureEnabled(_feature)) { + vm.skip(true); + } + } + + /// @notice Skips tests when the provided system feature is disabled. + /// @param _feature The feature to check. + function skipIfSysFeatureDisabled(bytes32 _feature) public { + if (!isSysFeatureEnabled(_feature)) { + vm.skip(true); + } + } + + /// @notice Skips tests when the provided development feature is enabled. + /// @param _feature The feature to check. + function skipIfDevFeatureEnabled(bytes32 _feature) public { + if (isDevFeatureEnabled(_feature)) { + vm.skip(true); + } + } + + /// @notice Skips tests when the provided development feature is disabled. + /// @param _feature The feature to check. + function skipIfDevFeatureDisabled(bytes32 _feature) public { + if (!isDevFeatureEnabled(_feature)) { + vm.skip(true); + } + } +} diff --git a/packages/contracts-bedrock/test/setup/ForkLive.s.sol b/packages/contracts-bedrock/test/setup/ForkLive.s.sol index 96c2752835cb6..cb80df50bd08e 100644 --- a/packages/contracts-bedrock/test/setup/ForkLive.s.sol +++ b/packages/contracts-bedrock/test/setup/ForkLive.s.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.0; import { console2 as console } from "forge-std/console2.sol"; +import { StdAssertions } from "forge-std/StdAssertions.sol"; // Testing import { stdToml } from "forge-std/StdToml.sol"; @@ -15,21 +16,22 @@ import { Config } from "scripts/libraries/Config.sol"; // Libraries import { GameTypes, Claim } from "src/dispute/lib/Types.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; -import { LibString } from "solady/src/utils/LibString.sol"; +import { LibString } from "@solady/utils/LibString.sol"; // Interfaces +import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IFaultDisputeGame } from "interfaces/dispute/IFaultDisputeGame.sol"; import { IPermissionedDisputeGame } from "interfaces/dispute/IPermissionedDisputeGame.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; -import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; import { IOPContractsManager } from "interfaces/L1/IOPContractsManager.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { IETHLockbox } from "interfaces/L1/IETHLockbox.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOPContractsManagerUpgrader } from "interfaces/L1/IOPContractsManager.sol"; /// @title ForkLive /// @notice This script is called by Setup.sol as a preparation step for the foundry test suite, and is run as an @@ -41,12 +43,15 @@ import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; /// superchain-registry. /// This contract must not have constructor logic because it is set into state using `etch`. -contract ForkLive is Deployer { +contract ForkLive is Deployer, StdAssertions { using stdToml for string; using LibString for string; bool public useOpsRepo; + /// @notice Thrown when testing with an unsupported chain ID. + error UnsupportedChainId(); + /// @notice Returns the base chain name to use for forking /// @return The base chain name as a string function baseChain() internal view returns (string memory) { @@ -188,16 +193,13 @@ contract ForkLive is Deployer { deploy.deployImplementations({ _isInterop: false }); } - /// @notice Upgrades the contracts using the OPCM. - function _upgrade() internal { - IOPContractsManager opcm = IOPContractsManager(artifacts.mustGetAddress("OPContractsManager")); - + /// @notice Performs a single OPCM upgrade. + /// @param _opcm The OPCM contract to upgrade. + /// @param _delegateCaller The address of the upgrader to use for the upgrade. + function _doUpgrade(IOPContractsManager _opcm, address _delegateCaller) internal { ISystemConfig systemConfig = ISystemConfig(artifacts.mustGetAddress("SystemConfigProxy")); IProxyAdmin proxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(systemConfig))); - address upgrader = proxyAdmin.owner(); - vm.label(upgrader, "ProxyAdmin Owner"); - IOPContractsManager.OpChainConfig[] memory opChains = new IOPContractsManager.OpChainConfig[](1); opChains[0] = IOPContractsManager.OpChainConfig({ systemConfigProxy: systemConfig, @@ -205,47 +207,71 @@ contract ForkLive is Deployer { absolutePrestate: Claim.wrap(bytes32(keccak256("absolutePrestate"))) }); + // Turn the SuperchainPAO into a DelegateCaller so we can try to upgrade the + // SuperchainConfig contract. + ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); + IProxyAdmin superchainProxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))); + address superchainPAO = superchainProxyAdmin.owner(); + bytes memory superchainPAOCode = address(superchainPAO).code; + vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + + // Always try to upgrade the SuperchainConfig. Not always necessary but easier to do it + // every time rather than adding or removing this code for each upgrade. + try DelegateCaller(superchainPAO).dcForward( + address(_opcm), + abi.encodeCall(IOPContractsManager.upgradeSuperchainConfig, (superchainConfig, superchainProxyAdmin)) + ) { + // Great, the upgrade succeeded. + } catch (bytes memory reason) { + // Only acceptable revert reason is the SuperchainConfig already being up to date. + assertTrue( + bytes4(reason) + == IOPContractsManagerUpgrader.OPContractsManagerUpgrader_SuperchainConfigAlreadyUpToDate.selector, + "Revert reason other than SuperchainConfigAlreadyUpToDate" + ); + } + + // Reset the superchainPAO to the original code. + vm.etch(superchainPAO, superchainPAOCode); + // Temporarily replace the upgrader with a DelegateCaller so we can test the upgrade, // then reset its code to the original code. - bytes memory upgraderCode = address(upgrader).code; - vm.etch(upgrader, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); + bytes memory upgraderCode = address(_delegateCaller).code; + vm.etch(_delegateCaller, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - // The 2.0.0 OPCM requires that the SuperchainConfig and ProtocolVersions contracts have - // been upgraded before it will upgrade other contracts. These contracts can only be - // upgraded by the Superchain ProxyAdmin owner. For simplicity, we always just call U13 - // once without any chain configs to trigger this upgrade. - ISuperchainConfig superchainConfig = ISuperchainConfig(artifacts.mustGetAddress("SuperchainConfigProxy")); - address superchainPAO = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))).owner(); - vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - DelegateCaller(superchainPAO).dcForward( - address(0x026b2F158255Beac46c1E7c6b8BbF29A4b6A7B76), - abi.encodeCall(IOPContractsManager.upgrade, (new IOPContractsManager.OpChainConfig[](0))) + // Upgrade the chain. + DelegateCaller(_delegateCaller).dcForward( + address(_opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChains)) ); - // Start by doing Upgrade 13. - DelegateCaller(upgrader).dcForward( - address(0x026b2F158255Beac46c1E7c6b8BbF29A4b6A7B76), abi.encodeCall(IOPContractsManager.upgrade, (opChains)) - ); + // Reset the upgrader to the original code. + vm.etch(_delegateCaller, upgraderCode); + } - // Then do Upgrade 14. - DelegateCaller(upgrader).dcForward( - address(0x3A1f523a4bc09cd344A2745a108Bb0398288094F), abi.encodeCall(IOPContractsManager.upgrade, (opChains)) - ); + /// @notice Upgrades the contracts using the OPCM. + function _upgrade() internal { + IOPContractsManager opcm = IOPContractsManager(artifacts.mustGetAddress("OPContractsManager")); - // Like with Upgrade 13, we need to first call U16 from the Superchain ProxyAdmin owner to - // trigger the upgrade of the SuperchainConfig contract. - vm.etch(superchainPAO, vm.getDeployedCode("test/mocks/Callers.sol:DelegateCaller")); - DelegateCaller(superchainPAO).dcForward( - address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (new IOPContractsManager.OpChainConfig[](0))) - ); + ISystemConfig systemConfig = ISystemConfig(artifacts.mustGetAddress("SystemConfigProxy")); + IProxyAdmin proxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(systemConfig))); - // Then do the final upgrade. - DelegateCaller(upgrader).dcForward(address(opcm), abi.encodeCall(IOPContractsManager.upgrade, (opChains))); + address upgrader = proxyAdmin.owner(); + vm.label(upgrader, "ProxyAdmin Owner"); - // Reset the upgrader to the original code. - vm.etch(upgrader, upgraderCode); + // Run past upgrades depending on network. + if (block.chainid == 1) { + // Mainnet + // U16a. + _doUpgrade(IOPContractsManager(0x8123739C1368C2DEDc8C564255bc417FEEeBFF9D), upgrader); + } else { + revert UnsupportedChainId(); + } + + // Current upgrade. + _doUpgrade(opcm, upgrader); console.log("ForkLive: Saving newly deployed contracts"); + // A new ASR and new dispute games were deployed, so we need to update them IDisputeGameFactory disputeGameFactory = IDisputeGameFactory(artifacts.mustGetAddress("DisputeGameFactoryProxy")); diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index caf9e2823f7a4..d82dfd9d27be8 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -4,6 +4,8 @@ pragma solidity 0.8.15; // Testing import { console2 as console } from "forge-std/console2.sol"; import { Vm, VmSafe } from "forge-std/Vm.sol"; +import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { FeatureFlags } from "test/setup/FeatureFlags.sol"; // Scripts import { Deploy } from "scripts/deploy/Deploy.s.sol"; @@ -67,7 +69,7 @@ import { ICrossL2Inbox } from "interfaces/L2/ICrossL2Inbox.sol"; /// sets the L2 contracts directly at the predeploy addresses instead of setting them /// up behind proxies. In the future we will migrate to importing the genesis JSON /// file that is created to set up the L2 contracts instead of setting them up manually. -contract Setup { +contract Setup is FeatureFlags { using ForkUtils for Fork; /// @notice The address of the foundry Vm contract. @@ -104,6 +106,8 @@ contract Setup { // L1 contracts - core address proxyAdminOwner; IProxyAdmin proxyAdmin; + address superchainProxyAdminOwner; + IProxyAdmin superchainProxyAdmin; IOptimismPortal optimismPortal2; IETHLockbox ethLockbox; ISystemConfig systemConfig; @@ -181,6 +185,10 @@ contract Setup { deploy.setUp(); forkLive.setUp(); + + resolveFeaturesFromEnv(); + deploy.cfg().setDevFeatureBitmap(devFeatureBitmap); + console.log("Setup: L1 setup done!"); if (isForkTest()) { @@ -259,7 +267,10 @@ contract Setup { // Only skip ETHLockbox assignment if we're in a fork test with non-upgraded fork // TODO(#14691): Remove this check once Upgrade 15 is deployed on Mainnet. if (!isForkTest() || deploy.cfg().useUpgradedFork()) { - ethLockbox = IETHLockbox(artifacts.mustGetAddress("ETHLockboxProxy")); + // Here we use getAddress instead of mustGetAddress because some chains might not have + // the ETHLockbox proxy. Chains that don't have the ETHLockbox proxy will just return + // address(0) and cause a revert if we use mustGetAddress. + ethLockbox = IETHLockbox(artifacts.getAddress("ETHLockboxProxy")); } systemConfig = ISystemConfig(artifacts.mustGetAddress("SystemConfigProxy")); @@ -280,6 +291,8 @@ contract Setup { opcm = IOPContractsManager(artifacts.mustGetAddress("OPContractsManager")); proxyAdmin = IProxyAdmin(artifacts.mustGetAddress("ProxyAdmin")); proxyAdminOwner = proxyAdmin.owner(); + superchainProxyAdmin = IProxyAdmin(EIP1967Helper.getAdmin(address(superchainConfig))); + superchainProxyAdminOwner = superchainProxyAdmin.owner(); mips = IBigStepper(artifacts.mustGetAddress("MipsSingleton")); if (deploy.cfg().useAltDA()) { @@ -288,6 +301,9 @@ contract Setup { } console.log("Setup: registered L1 deployments"); + + // Update the SystemConfig address. + setSystemConfig(systemConfig); } /// @dev Sets up the L2 contracts. Depends on `L1()` being called first. diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol index ad885e583c45f..017aa47f68e11 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20.t.sol @@ -133,10 +133,10 @@ contract OptimismMintableERC20_Bridge_Test is OptimismMintableERC20_TestInit { } } -/// @title OptimismMintableERC20_Unclassified_Test +/// @title OptimismMintableERC20_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `OptimismMintableERC20` /// contract. -contract OptimismMintableERC20_Unclassified_Test is OptimismMintableERC20_TestInit { +contract OptimismMintableERC20_Uncategorized_Test is OptimismMintableERC20_TestInit { function test_legacy_succeeds() external view { // Getters for the remote token assertEq(L2Token.REMOTE_TOKEN(), address(L1Token)); diff --git a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol index c1164b4d6387e..3772953c5404d 100644 --- a/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol +++ b/packages/contracts-bedrock/test/universal/OptimismMintableERC20Factory.t.sol @@ -218,10 +218,10 @@ contract OptimismMintableERC20Factory_CreateStandardL2Token_Test is OptimismMint } } -/// @title OptimismMintableERC20Factory_Unclassified_Test +/// @title OptimismMintableERC20Factory_Uncategorized_Test /// @notice General tests that are not testing any function directly of the /// `OptimismMintableERC20Factory` contract. -contract OptimismMintableERC20Factory_Unclassified_Test is OptimismMintableERC20Factory_TestInit { +contract OptimismMintableERC20Factory_Uncategorized_Test is OptimismMintableERC20Factory_TestInit { /// @notice Tests that the upgrade is successful. function test_upgrading_succeeds() external { IProxy proxy = IProxy(artifacts.mustGetAddress("OptimismMintableERC20FactoryProxy")); diff --git a/packages/contracts-bedrock/test/universal/Proxy.t.sol b/packages/contracts-bedrock/test/universal/Proxy.t.sol index 4c8727afc16d0..2ad098881c40f 100644 --- a/packages/contracts-bedrock/test/universal/Proxy.t.sol +++ b/packages/contracts-bedrock/test/universal/Proxy.t.sol @@ -257,9 +257,9 @@ contract Proxy_Implementation_Test is Proxy_TestInit { } } -/// @title Proxy_Unclassified_Test +/// @title Proxy_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `Proxy` contract. -contract Proxy_Unclassified_Test is Proxy_TestInit { +contract Proxy_Uncategorized_Test is Proxy_TestInit { function test_delegatesToImpl_succeeds() external { // Call the storage setter on the proxy Proxy_SimpleStorage_Harness(address(proxy)).set(1, 1); diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index 12bfea4699d52..160bb6ffefc8c 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -291,10 +291,10 @@ contract ProxyAdmin_UpgradeAndCall_Test is ProxyAdmin_TestInit { } } -/// @title ProxyAdmin_Unclassified_Test +/// @title ProxyAdmin_Uncategorized_Test /// @notice General tests that are not testing any function directly or that test multiple /// functions of the `ProxyAdmin` contract. -contract ProxyAdmin_Unclassified_Test is ProxyAdmin_TestInit { +contract ProxyAdmin_Uncategorized_Test is ProxyAdmin_TestInit { function test_owner_succeeds() external view { assertEq(admin.owner(), alice); } diff --git a/packages/contracts-bedrock/test/universal/WETH98.t.sol b/packages/contracts-bedrock/test/universal/WETH98.t.sol index 396b20ce5fdae..ca1158da307df 100644 --- a/packages/contracts-bedrock/test/universal/WETH98.t.sol +++ b/packages/contracts-bedrock/test/universal/WETH98.t.sol @@ -175,10 +175,10 @@ contract WETH98_TransferFrom_Test is WETH98_TestInit { } } -/// @title WETH98_Unclassified_Test +/// @title WETH98_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `WETH98` contract or /// are testing multiple functions at once. -contract WETH98_Unclassified_Test is WETH98_TestInit { +contract WETH98_Uncategorized_Test is WETH98_TestInit { function test_getName_succeeds() public view { assertEq(weth.name(), "Wrapped Ether"); assertEq(weth.symbol(), "WETH"); diff --git a/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol b/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol index e4978bd3e4916..5df2f4e0a255a 100644 --- a/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol +++ b/packages/contracts-bedrock/test/vendor/AddressAliasHelper.t.sol @@ -4,10 +4,10 @@ pragma solidity 0.8.15; import { Test } from "forge-std/Test.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; -/// @title AddressAliasHelper_Unclassified_Test +/// @title AddressAliasHelper_Uncategorized_Test /// @notice General tests that are not testing any function directly of the `AddressAliasHelper` /// contract or are testing multiple functions at once. -contract AddressAliasHelper_Unclassified_Test is Test { +contract AddressAliasHelper_Uncategorized_Test is Test { /// @notice Tests that applying and then undoing an alias results in the original address. function testFuzz_applyAndUndo_succeeds(address _address) external pure { address aliased = AddressAliasHelper.applyL1ToL2Alias(_address); diff --git a/packages/contracts-bedrock/test/vendor/Initializable.t.sol b/packages/contracts-bedrock/test/vendor/Initializable.t.sol index c7e481136ae9a..0497aec028b52 100644 --- a/packages/contracts-bedrock/test/vendor/Initializable.t.sol +++ b/packages/contracts-bedrock/test/vendor/Initializable.t.sol @@ -12,6 +12,7 @@ import { Process } from "scripts/libraries/Process.sol"; import { LibString } from "@solady/utils/LibString.sol"; import { GameType, Hash, Proposal } from "src/dispute/lib/Types.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; // Interfaces import { ISystemConfig } from "interfaces/L1/ISystemConfig.sol"; @@ -20,6 +21,7 @@ import { IResourceMetering } from "interfaces/L1/IResourceMetering.sol"; import { IDisputeGameFactory } from "interfaces/dispute/IDisputeGameFactory.sol"; import { ProtocolVersion } from "interfaces/L1/IProtocolVersions.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; +import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; /// @title Initializer_Test /// @dev Ensures that the `initialize()` function on contracts cannot be called more than @@ -119,22 +121,48 @@ contract Initializer_Test is CommonTest { initCalldata: abi.encodeCall(delayedWeth.initialize, (ISystemConfig(address(0)))) }) ); - // OptimismPortal2Impl - contracts.push( - InitializeableContract({ - name: "OptimismPortal2Impl", - target: EIP1967Helper.getImplementation(address(optimismPortal2)), - initCalldata: abi.encodeCall(optimismPortal2.initialize, (systemConfig, anchorStateRegistry, ethLockbox)) - }) - ); - // OptimismPortal2Proxy - contracts.push( - InitializeableContract({ - name: "OptimismPortal2Proxy", - target: address(optimismPortal2), - initCalldata: abi.encodeCall(optimismPortal2.initialize, (systemConfig, anchorStateRegistry, ethLockbox)) - }) - ); + + if (isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + // OptimismPortal2Impl + contracts.push( + InitializeableContract({ + name: "OptimismPortal2Impl", + target: EIP1967Helper.getImplementation(address(optimismPortal2)), + initCalldata: abi.encodeCall( + IOptimismPortalInterop(payable(optimismPortal2)).initialize, + (systemConfig, anchorStateRegistry, ethLockbox) + ) + }) + ); + // OptimismPortal2Proxy + contracts.push( + InitializeableContract({ + name: "OptimismPortal2Proxy", + target: address(optimismPortal2), + initCalldata: abi.encodeCall( + IOptimismPortalInterop(payable(optimismPortal2)).initialize, + (systemConfig, anchorStateRegistry, ethLockbox) + ) + }) + ); + } else { + // OptimismPortal2Impl + contracts.push( + InitializeableContract({ + name: "OptimismPortal2Impl", + target: EIP1967Helper.getImplementation(address(optimismPortal2)), + initCalldata: abi.encodeCall(optimismPortal2.initialize, (systemConfig, anchorStateRegistry)) + }) + ); + // OptimismPortal2Proxy + contracts.push( + InitializeableContract({ + name: "OptimismPortal2Proxy", + target: address(optimismPortal2), + initCalldata: abi.encodeCall(optimismPortal2.initialize, (systemConfig, anchorStateRegistry)) + }) + ); + } // SystemConfigImpl contracts.push( @@ -367,7 +395,7 @@ contract Initializer_Test is CommonTest { function test_cannotReinitialize_succeeds() public { // Collect exclusions. uint256 j; - string[] memory excludes = new string[](8); + string[] memory excludes = new string[](11); // Contract is currently not being deployed as part of the standard deployment script. excludes[j++] = "src/L2/OptimismSuperchainERC20.sol"; // Periphery contracts don't get deployed as part of the standard deployment script. @@ -378,11 +406,15 @@ contract Initializer_Test is CommonTest { // contracts and instead simply deploys them anonymously. Means that functions like "getInitializedSlot" // don't work properly. Remove these exclusions once the deployment script is fixed. excludes[j++] = "src/dispute/FaultDisputeGame.sol"; + excludes[j++] = "src/dispute/v2/FaultDisputeGameV2.sol"; + excludes[j++] = "src/dispute/v2/PermissionedDisputeGameV2.sol"; excludes[j++] = "src/dispute/SuperFaultDisputeGame.sol"; excludes[j++] = "src/dispute/PermissionedDisputeGame.sol"; excludes[j++] = "src/dispute/SuperPermissionedDisputeGame.sol"; // TODO: Eventually remove this exclusion. Same reason as above dispute contracts. excludes[j++] = "src/L1/OPContractsManager.sol"; + // TODO: Eventually remove this exclusion. Same reason as above dispute contracts. + excludes[j++] = "src/L1/OptimismPortalInterop.sol"; // L2 contract initialization is tested in Predeploys.t.sol excludes[j++] = "src/L2/*";